X86ISelLowering.cpp revision b2f1b5028c03cd16c2452e81544d2da51fb373f2
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86.h" 17#include "X86InstrBuilder.h" 18#include "X86ISelLowering.h" 19#include "X86TargetMachine.h" 20#include "X86TargetObjectFile.h" 21#include "Utils/X86ShuffleDecode.h" 22#include "llvm/CallingConv.h" 23#include "llvm/Constants.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalAlias.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/Function.h" 28#include "llvm/Instructions.h" 29#include "llvm/Intrinsics.h" 30#include "llvm/LLVMContext.h" 31#include "llvm/CodeGen/IntrinsicLowering.h" 32#include "llvm/CodeGen/MachineFrameInfo.h" 33#include "llvm/CodeGen/MachineFunction.h" 34#include "llvm/CodeGen/MachineInstrBuilder.h" 35#include "llvm/CodeGen/MachineJumpTableInfo.h" 36#include "llvm/CodeGen/MachineModuleInfo.h" 37#include "llvm/CodeGen/MachineRegisterInfo.h" 38#include "llvm/MC/MCAsmInfo.h" 39#include "llvm/MC/MCContext.h" 40#include "llvm/MC/MCExpr.h" 41#include "llvm/MC/MCSymbol.h" 42#include "llvm/ADT/BitVector.h" 43#include "llvm/ADT/SmallSet.h" 44#include "llvm/ADT/Statistic.h" 45#include "llvm/ADT/StringExtras.h" 46#include "llvm/ADT/VariadicFunction.h" 47#include "llvm/Support/CallSite.h" 48#include "llvm/Support/CommandLine.h" 49#include "llvm/Support/Debug.h" 50#include "llvm/Support/Dwarf.h" 51#include "llvm/Support/ErrorHandling.h" 52#include "llvm/Support/MathExtras.h" 53#include "llvm/Support/raw_ostream.h" 54#include "llvm/Target/TargetOptions.h" 55#include <bitset> 56using namespace llvm; 57using namespace dwarf; 58 59STATISTIC(NumTailCalls, "Number of tail calls"); 60 61static cl::opt<bool> UseRegMask("x86-use-regmask", 62 cl::desc("Use register masks for x86 calls")); 63 64// Forward declarations. 65static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 66 SDValue V2); 67 68/// Generate a DAG to grab 128-bits from a vector > 128 bits. This 69/// sets things up to match to an AVX VEXTRACTF128 instruction or a 70/// simple subregister reference. Idx is an index in the 128 bits we 71/// want. It need not be aligned to a 128-bit bounday. That makes 72/// lowering EXTRACT_VECTOR_ELT operations easier. 73static SDValue Extract128BitVector(SDValue Vec, 74 SDValue Idx, 75 SelectionDAG &DAG, 76 DebugLoc dl) { 77 EVT VT = Vec.getValueType(); 78 assert(VT.getSizeInBits() == 256 && "Unexpected vector size!"); 79 EVT ElVT = VT.getVectorElementType(); 80 int Factor = VT.getSizeInBits()/128; 81 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT, 82 VT.getVectorNumElements()/Factor); 83 84 // Extract from UNDEF is UNDEF. 85 if (Vec.getOpcode() == ISD::UNDEF) 86 return DAG.getNode(ISD::UNDEF, dl, ResultVT); 87 88 if (isa<ConstantSDNode>(Idx)) { 89 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 90 91 // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR 92 // we can match to VEXTRACTF128. 93 unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits(); 94 95 // This is the index of the first element of the 128-bit chunk 96 // we want. 97 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128) 98 * ElemsPerChunk); 99 100 SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32); 101 SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, 102 VecIdx); 103 104 return Result; 105 } 106 107 return SDValue(); 108} 109 110/// Generate a DAG to put 128-bits into a vector > 128 bits. This 111/// sets things up to match to an AVX VINSERTF128 instruction or a 112/// simple superregister reference. Idx is an index in the 128 bits 113/// we want. It need not be aligned to a 128-bit bounday. That makes 114/// lowering INSERT_VECTOR_ELT operations easier. 115static SDValue Insert128BitVector(SDValue Result, 116 SDValue Vec, 117 SDValue Idx, 118 SelectionDAG &DAG, 119 DebugLoc dl) { 120 if (isa<ConstantSDNode>(Idx)) { 121 EVT VT = Vec.getValueType(); 122 assert(VT.getSizeInBits() == 128 && "Unexpected vector size!"); 123 124 EVT ElVT = VT.getVectorElementType(); 125 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 126 EVT ResultVT = Result.getValueType(); 127 128 // Insert the relevant 128 bits. 129 unsigned ElemsPerChunk = 128/ElVT.getSizeInBits(); 130 131 // This is the index of the first element of the 128-bit chunk 132 // we want. 133 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128) 134 * ElemsPerChunk); 135 136 SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32); 137 Result = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, 138 VecIdx); 139 return Result; 140 } 141 142 return SDValue(); 143} 144 145static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { 146 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 147 bool is64Bit = Subtarget->is64Bit(); 148 149 if (Subtarget->isTargetEnvMacho()) { 150 if (is64Bit) 151 return new X8664_MachoTargetObjectFile(); 152 return new TargetLoweringObjectFileMachO(); 153 } 154 155 if (Subtarget->isTargetELF()) 156 return new TargetLoweringObjectFileELF(); 157 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 158 return new TargetLoweringObjectFileCOFF(); 159 llvm_unreachable("unknown subtarget type"); 160} 161 162X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) 163 : TargetLowering(TM, createTLOF(TM)) { 164 Subtarget = &TM.getSubtarget<X86Subtarget>(); 165 X86ScalarSSEf64 = Subtarget->hasSSE2(); 166 X86ScalarSSEf32 = Subtarget->hasSSE1(); 167 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 168 169 RegInfo = TM.getRegisterInfo(); 170 TD = getTargetData(); 171 172 // Set up the TargetLowering object. 173 static MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }; 174 175 // X86 is weird, it always uses i8 for shift amounts and setcc results. 176 setBooleanContents(ZeroOrOneBooleanContent); 177 // X86-SSE is even stranger. It uses -1 or 0 for vector masks. 178 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 179 180 // For 64-bit since we have so many registers use the ILP scheduler, for 181 // 32-bit code use the register pressure specific scheduling. 182 if (Subtarget->is64Bit()) 183 setSchedulingPreference(Sched::ILP); 184 else 185 setSchedulingPreference(Sched::RegPressure); 186 setStackPointerRegisterToSaveRestore(X86StackPtr); 187 188 if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) { 189 // Setup Windows compiler runtime calls. 190 setLibcallName(RTLIB::SDIV_I64, "_alldiv"); 191 setLibcallName(RTLIB::UDIV_I64, "_aulldiv"); 192 setLibcallName(RTLIB::SREM_I64, "_allrem"); 193 setLibcallName(RTLIB::UREM_I64, "_aullrem"); 194 setLibcallName(RTLIB::MUL_I64, "_allmul"); 195 setLibcallName(RTLIB::FPTOUINT_F64_I64, "_ftol2"); 196 setLibcallName(RTLIB::FPTOUINT_F32_I64, "_ftol2"); 197 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall); 198 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall); 199 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall); 200 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall); 201 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall); 202 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::C); 203 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::C); 204 } 205 206 if (Subtarget->isTargetDarwin()) { 207 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 208 setUseUnderscoreSetJmp(false); 209 setUseUnderscoreLongJmp(false); 210 } else if (Subtarget->isTargetMingw()) { 211 // MS runtime is weird: it exports _setjmp, but longjmp! 212 setUseUnderscoreSetJmp(true); 213 setUseUnderscoreLongJmp(false); 214 } else { 215 setUseUnderscoreSetJmp(true); 216 setUseUnderscoreLongJmp(true); 217 } 218 219 // Set up the register classes. 220 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 221 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 222 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 223 if (Subtarget->is64Bit()) 224 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 225 226 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 227 228 // We don't accept any truncstore of integer registers. 229 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 230 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 231 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 232 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 233 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 234 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 235 236 // SETOEQ and SETUNE require checking two conditions. 237 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); 238 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); 239 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand); 240 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand); 241 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); 242 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand); 243 244 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 245 // operation. 246 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 247 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 248 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 249 250 if (Subtarget->is64Bit()) { 251 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 252 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 253 } else if (!TM.Options.UseSoftFloat) { 254 // We have an algorithm for SSE2->double, and we turn this into a 255 // 64-bit FILD followed by conditional FADD for other targets. 256 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 257 // We have an algorithm for SSE2, and we turn this into a 64-bit 258 // FILD for other targets. 259 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom); 260 } 261 262 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 263 // this operation. 264 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 265 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 266 267 if (!TM.Options.UseSoftFloat) { 268 // SSE has no i16 to fp conversion, only i32 269 if (X86ScalarSSEf32) { 270 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 271 // f32 and f64 cases are Legal, f80 case is not 272 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 273 } else { 274 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 275 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 276 } 277 } else { 278 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 279 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote); 280 } 281 282 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 283 // are Legal, f80 is custom lowered. 284 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 285 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 286 287 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 288 // this operation. 289 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 290 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 291 292 if (X86ScalarSSEf32) { 293 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 294 // f32 and f64 cases are Legal, f80 case is not 295 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 296 } else { 297 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 298 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 299 } 300 301 // Handle FP_TO_UINT by promoting the destination to a larger signed 302 // conversion. 303 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 304 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 305 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 306 307 if (Subtarget->is64Bit()) { 308 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 309 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 310 } else if (!TM.Options.UseSoftFloat) { 311 // Since AVX is a superset of SSE3, only check for SSE here. 312 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3()) 313 // Expand FP_TO_UINT into a select. 314 // FIXME: We would like to use a Custom expander here eventually to do 315 // the optimal thing for SSE vs. the default expansion in the legalizer. 316 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 317 else 318 // With SSE3 we can use fisttpll to convert to a signed i64; without 319 // SSE, we're stuck with a fistpll. 320 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom); 321 } 322 323 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 324 if (!X86ScalarSSEf64) { 325 setOperationAction(ISD::BITCAST , MVT::f32 , Expand); 326 setOperationAction(ISD::BITCAST , MVT::i32 , Expand); 327 if (Subtarget->is64Bit()) { 328 setOperationAction(ISD::BITCAST , MVT::f64 , Expand); 329 // Without SSE, i64->f64 goes through memory. 330 setOperationAction(ISD::BITCAST , MVT::i64 , Expand); 331 } 332 } 333 334 // Scalar integer divide and remainder are lowered to use operations that 335 // produce two results, to match the available instructions. This exposes 336 // the two-result form to trivial CSE, which is able to combine x/y and x%y 337 // into a single instruction. 338 // 339 // Scalar integer multiply-high is also lowered to use two-result 340 // operations, to match the available instructions. However, plain multiply 341 // (low) operations are left as Legal, as there are single-result 342 // instructions for this in x86. Using the two-result multiply instructions 343 // when both high and low results are needed must be arranged by dagcombine. 344 for (unsigned i = 0, e = 4; i != e; ++i) { 345 MVT VT = IntVTs[i]; 346 setOperationAction(ISD::MULHS, VT, Expand); 347 setOperationAction(ISD::MULHU, VT, Expand); 348 setOperationAction(ISD::SDIV, VT, Expand); 349 setOperationAction(ISD::UDIV, VT, Expand); 350 setOperationAction(ISD::SREM, VT, Expand); 351 setOperationAction(ISD::UREM, VT, Expand); 352 353 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences. 354 setOperationAction(ISD::ADDC, VT, Custom); 355 setOperationAction(ISD::ADDE, VT, Custom); 356 setOperationAction(ISD::SUBC, VT, Custom); 357 setOperationAction(ISD::SUBE, VT, Custom); 358 } 359 360 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 361 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 362 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 363 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 364 if (Subtarget->is64Bit()) 365 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 366 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 367 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 368 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 369 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 370 setOperationAction(ISD::FREM , MVT::f32 , Expand); 371 setOperationAction(ISD::FREM , MVT::f64 , Expand); 372 setOperationAction(ISD::FREM , MVT::f80 , Expand); 373 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 374 375 // Promote the i8 variants and force them on up to i32 which has a shorter 376 // encoding. 377 setOperationAction(ISD::CTTZ , MVT::i8 , Promote); 378 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32); 379 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote); 380 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32); 381 if (Subtarget->hasBMI()) { 382 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand); 383 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand); 384 if (Subtarget->is64Bit()) 385 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 386 } else { 387 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 388 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 389 if (Subtarget->is64Bit()) 390 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 391 } 392 393 if (Subtarget->hasLZCNT()) { 394 // When promoting the i8 variants, force them to i32 for a shorter 395 // encoding. 396 setOperationAction(ISD::CTLZ , MVT::i8 , Promote); 397 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32); 398 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote); 399 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32); 400 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand); 401 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand); 402 if (Subtarget->is64Bit()) 403 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 404 } else { 405 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 406 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 407 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 408 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom); 409 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom); 410 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom); 411 if (Subtarget->is64Bit()) { 412 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 413 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); 414 } 415 } 416 417 if (Subtarget->hasPOPCNT()) { 418 setOperationAction(ISD::CTPOP , MVT::i8 , Promote); 419 } else { 420 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 421 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 422 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 423 if (Subtarget->is64Bit()) 424 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 425 } 426 427 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 428 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 429 430 // These should be promoted to a larger select which is supported. 431 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 432 // X86 wants to expand cmov itself. 433 setOperationAction(ISD::SELECT , MVT::i8 , Custom); 434 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 435 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 436 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 437 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 438 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 439 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 440 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 441 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 442 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 443 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 444 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 445 if (Subtarget->is64Bit()) { 446 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 447 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 448 } 449 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 450 451 // Darwin ABI issue. 452 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 453 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 454 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 455 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 456 if (Subtarget->is64Bit()) 457 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 458 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 459 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom); 460 if (Subtarget->is64Bit()) { 461 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 462 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 463 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 464 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 465 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom); 466 } 467 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 468 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 469 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 470 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 471 if (Subtarget->is64Bit()) { 472 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 473 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 474 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 475 } 476 477 if (Subtarget->hasSSE1()) 478 setOperationAction(ISD::PREFETCH , MVT::Other, Legal); 479 480 setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom); 481 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom); 482 483 // On X86 and X86-64, atomic operations are lowered to locked instructions. 484 // Locked instructions, in turn, have implicit fence semantics (all memory 485 // operations are flushed before issuing the locked instruction, and they 486 // are not buffered), so we can fold away the common pattern of 487 // fence-atomic-fence. 488 setShouldFoldAtomicFences(true); 489 490 // Expand certain atomics 491 for (unsigned i = 0, e = 4; i != e; ++i) { 492 MVT VT = IntVTs[i]; 493 setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom); 494 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 495 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 496 } 497 498 if (!Subtarget->is64Bit()) { 499 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); 500 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 501 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 502 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 503 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 504 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 505 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom); 506 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 507 } 508 509 if (Subtarget->hasCmpxchg16b()) { 510 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom); 511 } 512 513 // FIXME - use subtarget debug flags 514 if (!Subtarget->isTargetDarwin() && 515 !Subtarget->isTargetELF() && 516 !Subtarget->isTargetCygMing()) { 517 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); 518 } 519 520 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 521 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 522 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 523 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 524 if (Subtarget->is64Bit()) { 525 setExceptionPointerRegister(X86::RAX); 526 setExceptionSelectorRegister(X86::RDX); 527 } else { 528 setExceptionPointerRegister(X86::EAX); 529 setExceptionSelectorRegister(X86::EDX); 530 } 531 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 532 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom); 533 534 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 535 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 536 537 setOperationAction(ISD::TRAP, MVT::Other, Legal); 538 539 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 540 setOperationAction(ISD::VASTART , MVT::Other, Custom); 541 setOperationAction(ISD::VAEND , MVT::Other, Expand); 542 if (Subtarget->is64Bit()) { 543 setOperationAction(ISD::VAARG , MVT::Other, Custom); 544 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 545 } else { 546 setOperationAction(ISD::VAARG , MVT::Other, Expand); 547 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 548 } 549 550 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 551 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 552 553 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 554 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 555 MVT::i64 : MVT::i32, Custom); 556 else if (TM.Options.EnableSegmentedStacks) 557 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 558 MVT::i64 : MVT::i32, Custom); 559 else 560 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 561 MVT::i64 : MVT::i32, Expand); 562 563 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) { 564 // f32 and f64 use SSE. 565 // Set up the FP register classes. 566 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 567 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 568 569 // Use ANDPD to simulate FABS. 570 setOperationAction(ISD::FABS , MVT::f64, Custom); 571 setOperationAction(ISD::FABS , MVT::f32, Custom); 572 573 // Use XORP to simulate FNEG. 574 setOperationAction(ISD::FNEG , MVT::f64, Custom); 575 setOperationAction(ISD::FNEG , MVT::f32, Custom); 576 577 // Use ANDPD and ORPD to simulate FCOPYSIGN. 578 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 579 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 580 581 // Lower this to FGETSIGNx86 plus an AND. 582 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom); 583 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom); 584 585 // We don't support sin/cos/fmod 586 setOperationAction(ISD::FSIN , MVT::f64, Expand); 587 setOperationAction(ISD::FCOS , MVT::f64, Expand); 588 setOperationAction(ISD::FSIN , MVT::f32, Expand); 589 setOperationAction(ISD::FCOS , MVT::f32, Expand); 590 591 // Expand FP immediates into loads from the stack, except for the special 592 // cases we handle. 593 addLegalFPImmediate(APFloat(+0.0)); // xorpd 594 addLegalFPImmediate(APFloat(+0.0f)); // xorps 595 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) { 596 // Use SSE for f32, x87 for f64. 597 // Set up the FP register classes. 598 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 599 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 600 601 // Use ANDPS to simulate FABS. 602 setOperationAction(ISD::FABS , MVT::f32, Custom); 603 604 // Use XORP to simulate FNEG. 605 setOperationAction(ISD::FNEG , MVT::f32, Custom); 606 607 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 608 609 // Use ANDPS and ORPS to simulate FCOPYSIGN. 610 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 611 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 612 613 // We don't support sin/cos/fmod 614 setOperationAction(ISD::FSIN , MVT::f32, Expand); 615 setOperationAction(ISD::FCOS , MVT::f32, Expand); 616 617 // Special cases we handle for FP constants. 618 addLegalFPImmediate(APFloat(+0.0f)); // xorps 619 addLegalFPImmediate(APFloat(+0.0)); // FLD0 620 addLegalFPImmediate(APFloat(+1.0)); // FLD1 621 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 622 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 623 624 if (!TM.Options.UnsafeFPMath) { 625 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 626 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 627 } 628 } else if (!TM.Options.UseSoftFloat) { 629 // f32 and f64 in x87. 630 // Set up the FP register classes. 631 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 632 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 633 634 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 635 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 636 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 637 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 638 639 if (!TM.Options.UnsafeFPMath) { 640 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 641 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 642 } 643 addLegalFPImmediate(APFloat(+0.0)); // FLD0 644 addLegalFPImmediate(APFloat(+1.0)); // FLD1 645 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 646 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 647 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 648 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 649 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 650 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 651 } 652 653 // We don't support FMA. 654 setOperationAction(ISD::FMA, MVT::f64, Expand); 655 setOperationAction(ISD::FMA, MVT::f32, Expand); 656 657 // Long double always uses X87. 658 if (!TM.Options.UseSoftFloat) { 659 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 660 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 661 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 662 { 663 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended); 664 addLegalFPImmediate(TmpFlt); // FLD0 665 TmpFlt.changeSign(); 666 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 667 668 bool ignored; 669 APFloat TmpFlt2(+1.0); 670 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven, 671 &ignored); 672 addLegalFPImmediate(TmpFlt2); // FLD1 673 TmpFlt2.changeSign(); 674 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 675 } 676 677 if (!TM.Options.UnsafeFPMath) { 678 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 679 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 680 } 681 682 setOperationAction(ISD::FFLOOR, MVT::f80, Expand); 683 setOperationAction(ISD::FCEIL, MVT::f80, Expand); 684 setOperationAction(ISD::FTRUNC, MVT::f80, Expand); 685 setOperationAction(ISD::FRINT, MVT::f80, Expand); 686 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand); 687 setOperationAction(ISD::FMA, MVT::f80, Expand); 688 } 689 690 // Always use a library call for pow. 691 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 692 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 693 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 694 695 setOperationAction(ISD::FLOG, MVT::f80, Expand); 696 setOperationAction(ISD::FLOG2, MVT::f80, Expand); 697 setOperationAction(ISD::FLOG10, MVT::f80, Expand); 698 setOperationAction(ISD::FEXP, MVT::f80, Expand); 699 setOperationAction(ISD::FEXP2, MVT::f80, Expand); 700 701 // First set operation action for all vector types to either promote 702 // (for widening) or expand (for scalarization). Then we will selectively 703 // turn on ones that can be effectively codegen'd. 704 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 705 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 706 setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand); 707 setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand); 708 setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand); 709 setOperationAction(ISD::FNEG, (MVT::SimpleValueType)VT, Expand); 710 setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand); 711 setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand); 712 setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand); 713 setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand); 714 setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand); 715 setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand); 716 setOperationAction(ISD::SREM, (MVT::SimpleValueType)VT, Expand); 717 setOperationAction(ISD::UREM, (MVT::SimpleValueType)VT, Expand); 718 setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand); 719 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand); 720 setOperationAction(ISD::EXTRACT_VECTOR_ELT,(MVT::SimpleValueType)VT,Expand); 721 setOperationAction(ISD::INSERT_VECTOR_ELT,(MVT::SimpleValueType)VT, Expand); 722 setOperationAction(ISD::EXTRACT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 723 setOperationAction(ISD::INSERT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 724 setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand); 725 setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand); 726 setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand); 727 setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand); 728 setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand); 729 setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand); 730 setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand); 731 setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 732 setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 733 setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand); 734 setOperationAction(ISD::UDIVREM, (MVT::SimpleValueType)VT, Expand); 735 setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand); 736 setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand); 737 setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand); 738 setOperationAction(ISD::CTTZ_ZERO_UNDEF, (MVT::SimpleValueType)VT, Expand); 739 setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand); 740 setOperationAction(ISD::CTLZ_ZERO_UNDEF, (MVT::SimpleValueType)VT, Expand); 741 setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand); 742 setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand); 743 setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand); 744 setOperationAction(ISD::ROTL, (MVT::SimpleValueType)VT, Expand); 745 setOperationAction(ISD::ROTR, (MVT::SimpleValueType)VT, Expand); 746 setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand); 747 setOperationAction(ISD::SETCC, (MVT::SimpleValueType)VT, Expand); 748 setOperationAction(ISD::FLOG, (MVT::SimpleValueType)VT, Expand); 749 setOperationAction(ISD::FLOG2, (MVT::SimpleValueType)VT, Expand); 750 setOperationAction(ISD::FLOG10, (MVT::SimpleValueType)VT, Expand); 751 setOperationAction(ISD::FEXP, (MVT::SimpleValueType)VT, Expand); 752 setOperationAction(ISD::FEXP2, (MVT::SimpleValueType)VT, Expand); 753 setOperationAction(ISD::FP_TO_UINT, (MVT::SimpleValueType)VT, Expand); 754 setOperationAction(ISD::FP_TO_SINT, (MVT::SimpleValueType)VT, Expand); 755 setOperationAction(ISD::UINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 756 setOperationAction(ISD::SINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 757 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT,Expand); 758 setOperationAction(ISD::TRUNCATE, (MVT::SimpleValueType)VT, Expand); 759 setOperationAction(ISD::SIGN_EXTEND, (MVT::SimpleValueType)VT, Expand); 760 setOperationAction(ISD::ZERO_EXTEND, (MVT::SimpleValueType)VT, Expand); 761 setOperationAction(ISD::ANY_EXTEND, (MVT::SimpleValueType)VT, Expand); 762 setOperationAction(ISD::VSELECT, (MVT::SimpleValueType)VT, Expand); 763 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 764 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 765 setTruncStoreAction((MVT::SimpleValueType)VT, 766 (MVT::SimpleValueType)InnerVT, Expand); 767 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); 768 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); 769 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); 770 } 771 772 // FIXME: In order to prevent SSE instructions being expanded to MMX ones 773 // with -msoft-float, disable use of MMX as well. 774 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) { 775 addRegisterClass(MVT::x86mmx, X86::VR64RegisterClass); 776 // No operations on x86mmx supported, everything uses intrinsics. 777 } 778 779 // MMX-sized vectors (other than x86mmx) are expected to be expanded 780 // into smaller operations. 781 setOperationAction(ISD::MULHS, MVT::v8i8, Expand); 782 setOperationAction(ISD::MULHS, MVT::v4i16, Expand); 783 setOperationAction(ISD::MULHS, MVT::v2i32, Expand); 784 setOperationAction(ISD::MULHS, MVT::v1i64, Expand); 785 setOperationAction(ISD::AND, MVT::v8i8, Expand); 786 setOperationAction(ISD::AND, MVT::v4i16, Expand); 787 setOperationAction(ISD::AND, MVT::v2i32, Expand); 788 setOperationAction(ISD::AND, MVT::v1i64, Expand); 789 setOperationAction(ISD::OR, MVT::v8i8, Expand); 790 setOperationAction(ISD::OR, MVT::v4i16, Expand); 791 setOperationAction(ISD::OR, MVT::v2i32, Expand); 792 setOperationAction(ISD::OR, MVT::v1i64, Expand); 793 setOperationAction(ISD::XOR, MVT::v8i8, Expand); 794 setOperationAction(ISD::XOR, MVT::v4i16, Expand); 795 setOperationAction(ISD::XOR, MVT::v2i32, Expand); 796 setOperationAction(ISD::XOR, MVT::v1i64, Expand); 797 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand); 798 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand); 799 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand); 800 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand); 801 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand); 802 setOperationAction(ISD::SELECT, MVT::v8i8, Expand); 803 setOperationAction(ISD::SELECT, MVT::v4i16, Expand); 804 setOperationAction(ISD::SELECT, MVT::v2i32, Expand); 805 setOperationAction(ISD::SELECT, MVT::v1i64, Expand); 806 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand); 807 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand); 808 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand); 809 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand); 810 811 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) { 812 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 813 814 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 815 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 816 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 817 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 818 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 819 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 820 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 821 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 822 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 823 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 824 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 825 setOperationAction(ISD::SETCC, MVT::v4f32, Custom); 826 } 827 828 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) { 829 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 830 831 // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM 832 // registers cannot be used even for integer operations. 833 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 834 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 835 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 836 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 837 838 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 839 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 840 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 841 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 842 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 843 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 844 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 845 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 846 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 847 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 848 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 849 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 850 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 851 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 852 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 853 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 854 855 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 856 setOperationAction(ISD::SETCC, MVT::v16i8, Custom); 857 setOperationAction(ISD::SETCC, MVT::v8i16, Custom); 858 setOperationAction(ISD::SETCC, MVT::v4i32, Custom); 859 860 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 861 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 862 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 863 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 864 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 865 866 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Custom); 867 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Custom); 868 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Custom); 869 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Custom); 870 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom); 871 872 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 873 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; ++i) { 874 EVT VT = (MVT::SimpleValueType)i; 875 // Do not attempt to custom lower non-power-of-2 vectors 876 if (!isPowerOf2_32(VT.getVectorNumElements())) 877 continue; 878 // Do not attempt to custom lower non-128-bit vectors 879 if (!VT.is128BitVector()) 880 continue; 881 setOperationAction(ISD::BUILD_VECTOR, 882 VT.getSimpleVT().SimpleTy, Custom); 883 setOperationAction(ISD::VECTOR_SHUFFLE, 884 VT.getSimpleVT().SimpleTy, Custom); 885 setOperationAction(ISD::EXTRACT_VECTOR_ELT, 886 VT.getSimpleVT().SimpleTy, Custom); 887 } 888 889 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 890 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 891 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 892 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 893 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 894 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 895 896 if (Subtarget->is64Bit()) { 897 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 898 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 899 } 900 901 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 902 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; i++) { 903 MVT::SimpleValueType SVT = (MVT::SimpleValueType)i; 904 EVT VT = SVT; 905 906 // Do not attempt to promote non-128-bit vectors 907 if (!VT.is128BitVector()) 908 continue; 909 910 setOperationAction(ISD::AND, SVT, Promote); 911 AddPromotedToType (ISD::AND, SVT, MVT::v2i64); 912 setOperationAction(ISD::OR, SVT, Promote); 913 AddPromotedToType (ISD::OR, SVT, MVT::v2i64); 914 setOperationAction(ISD::XOR, SVT, Promote); 915 AddPromotedToType (ISD::XOR, SVT, MVT::v2i64); 916 setOperationAction(ISD::LOAD, SVT, Promote); 917 AddPromotedToType (ISD::LOAD, SVT, MVT::v2i64); 918 setOperationAction(ISD::SELECT, SVT, Promote); 919 AddPromotedToType (ISD::SELECT, SVT, MVT::v2i64); 920 } 921 922 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 923 924 // Custom lower v2i64 and v2f64 selects. 925 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 926 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 927 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 928 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 929 930 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 931 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 932 } 933 934 if (Subtarget->hasSSE41()) { 935 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 936 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 937 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 938 setOperationAction(ISD::FRINT, MVT::f32, Legal); 939 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 940 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 941 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 942 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 943 setOperationAction(ISD::FRINT, MVT::f64, Legal); 944 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 945 946 // FIXME: Do we need to handle scalar-to-vector here? 947 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 948 949 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 950 setOperationAction(ISD::VSELECT, MVT::v2i64, Legal); 951 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 952 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 953 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 954 955 // i8 and i16 vectors are custom , because the source register and source 956 // source memory operand types are not the same width. f32 vectors are 957 // custom since the immediate controlling the insert encodes additional 958 // information. 959 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 960 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 961 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 962 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 963 964 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 965 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 966 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom); 967 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 968 969 // FIXME: these should be Legal but thats only for the case where 970 // the index is constant. For now custom expand to deal with that. 971 if (Subtarget->is64Bit()) { 972 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 973 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 974 } 975 } 976 977 if (Subtarget->hasSSE2()) { 978 setOperationAction(ISD::SRL, MVT::v8i16, Custom); 979 setOperationAction(ISD::SRL, MVT::v16i8, Custom); 980 981 setOperationAction(ISD::SHL, MVT::v8i16, Custom); 982 setOperationAction(ISD::SHL, MVT::v16i8, Custom); 983 984 setOperationAction(ISD::SRA, MVT::v8i16, Custom); 985 setOperationAction(ISD::SRA, MVT::v16i8, Custom); 986 987 if (Subtarget->hasAVX2()) { 988 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 989 setOperationAction(ISD::SRL, MVT::v4i32, Legal); 990 991 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 992 setOperationAction(ISD::SHL, MVT::v4i32, Legal); 993 994 setOperationAction(ISD::SRA, MVT::v4i32, Legal); 995 } else { 996 setOperationAction(ISD::SRL, MVT::v2i64, Custom); 997 setOperationAction(ISD::SRL, MVT::v4i32, Custom); 998 999 setOperationAction(ISD::SHL, MVT::v2i64, Custom); 1000 setOperationAction(ISD::SHL, MVT::v4i32, Custom); 1001 1002 setOperationAction(ISD::SRA, MVT::v4i32, Custom); 1003 } 1004 } 1005 1006 if (Subtarget->hasSSE42()) 1007 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 1008 1009 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX()) { 1010 addRegisterClass(MVT::v32i8, X86::VR256RegisterClass); 1011 addRegisterClass(MVT::v16i16, X86::VR256RegisterClass); 1012 addRegisterClass(MVT::v8i32, X86::VR256RegisterClass); 1013 addRegisterClass(MVT::v8f32, X86::VR256RegisterClass); 1014 addRegisterClass(MVT::v4i64, X86::VR256RegisterClass); 1015 addRegisterClass(MVT::v4f64, X86::VR256RegisterClass); 1016 1017 setOperationAction(ISD::LOAD, MVT::v8f32, Legal); 1018 setOperationAction(ISD::LOAD, MVT::v4f64, Legal); 1019 setOperationAction(ISD::LOAD, MVT::v4i64, Legal); 1020 1021 setOperationAction(ISD::FADD, MVT::v8f32, Legal); 1022 setOperationAction(ISD::FSUB, MVT::v8f32, Legal); 1023 setOperationAction(ISD::FMUL, MVT::v8f32, Legal); 1024 setOperationAction(ISD::FDIV, MVT::v8f32, Legal); 1025 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal); 1026 setOperationAction(ISD::FNEG, MVT::v8f32, Custom); 1027 1028 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 1029 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 1030 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 1031 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 1032 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 1033 setOperationAction(ISD::FNEG, MVT::v4f64, Custom); 1034 1035 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal); 1036 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal); 1037 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal); 1038 1039 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f64, Custom); 1040 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i64, Custom); 1041 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom); 1042 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom); 1043 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i8, Custom); 1044 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i16, Custom); 1045 1046 setOperationAction(ISD::SRL, MVT::v16i16, Custom); 1047 setOperationAction(ISD::SRL, MVT::v32i8, Custom); 1048 1049 setOperationAction(ISD::SHL, MVT::v16i16, Custom); 1050 setOperationAction(ISD::SHL, MVT::v32i8, Custom); 1051 1052 setOperationAction(ISD::SRA, MVT::v16i16, Custom); 1053 setOperationAction(ISD::SRA, MVT::v32i8, Custom); 1054 1055 setOperationAction(ISD::SETCC, MVT::v32i8, Custom); 1056 setOperationAction(ISD::SETCC, MVT::v16i16, Custom); 1057 setOperationAction(ISD::SETCC, MVT::v8i32, Custom); 1058 setOperationAction(ISD::SETCC, MVT::v4i64, Custom); 1059 1060 setOperationAction(ISD::SELECT, MVT::v4f64, Custom); 1061 setOperationAction(ISD::SELECT, MVT::v4i64, Custom); 1062 setOperationAction(ISD::SELECT, MVT::v8f32, Custom); 1063 1064 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 1065 setOperationAction(ISD::VSELECT, MVT::v4i64, Legal); 1066 setOperationAction(ISD::VSELECT, MVT::v8i32, Legal); 1067 setOperationAction(ISD::VSELECT, MVT::v8f32, Legal); 1068 1069 if (Subtarget->hasAVX2()) { 1070 setOperationAction(ISD::ADD, MVT::v4i64, Legal); 1071 setOperationAction(ISD::ADD, MVT::v8i32, Legal); 1072 setOperationAction(ISD::ADD, MVT::v16i16, Legal); 1073 setOperationAction(ISD::ADD, MVT::v32i8, Legal); 1074 1075 setOperationAction(ISD::SUB, MVT::v4i64, Legal); 1076 setOperationAction(ISD::SUB, MVT::v8i32, Legal); 1077 setOperationAction(ISD::SUB, MVT::v16i16, Legal); 1078 setOperationAction(ISD::SUB, MVT::v32i8, Legal); 1079 1080 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1081 setOperationAction(ISD::MUL, MVT::v8i32, Legal); 1082 setOperationAction(ISD::MUL, MVT::v16i16, Legal); 1083 // Don't lower v32i8 because there is no 128-bit byte mul 1084 1085 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal); 1086 1087 setOperationAction(ISD::SRL, MVT::v4i64, Legal); 1088 setOperationAction(ISD::SRL, MVT::v8i32, Legal); 1089 1090 setOperationAction(ISD::SHL, MVT::v4i64, Legal); 1091 setOperationAction(ISD::SHL, MVT::v8i32, Legal); 1092 1093 setOperationAction(ISD::SRA, MVT::v8i32, Legal); 1094 } else { 1095 setOperationAction(ISD::ADD, MVT::v4i64, Custom); 1096 setOperationAction(ISD::ADD, MVT::v8i32, Custom); 1097 setOperationAction(ISD::ADD, MVT::v16i16, Custom); 1098 setOperationAction(ISD::ADD, MVT::v32i8, Custom); 1099 1100 setOperationAction(ISD::SUB, MVT::v4i64, Custom); 1101 setOperationAction(ISD::SUB, MVT::v8i32, Custom); 1102 setOperationAction(ISD::SUB, MVT::v16i16, Custom); 1103 setOperationAction(ISD::SUB, MVT::v32i8, Custom); 1104 1105 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1106 setOperationAction(ISD::MUL, MVT::v8i32, Custom); 1107 setOperationAction(ISD::MUL, MVT::v16i16, Custom); 1108 // Don't lower v32i8 because there is no 128-bit byte mul 1109 1110 setOperationAction(ISD::SRL, MVT::v4i64, Custom); 1111 setOperationAction(ISD::SRL, MVT::v8i32, Custom); 1112 1113 setOperationAction(ISD::SHL, MVT::v4i64, Custom); 1114 setOperationAction(ISD::SHL, MVT::v8i32, Custom); 1115 1116 setOperationAction(ISD::SRA, MVT::v8i32, Custom); 1117 } 1118 1119 // Custom lower several nodes for 256-bit types. 1120 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 1121 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 1122 MVT::SimpleValueType SVT = (MVT::SimpleValueType)i; 1123 EVT VT = SVT; 1124 1125 // Extract subvector is special because the value type 1126 // (result) is 128-bit but the source is 256-bit wide. 1127 if (VT.is128BitVector()) 1128 setOperationAction(ISD::EXTRACT_SUBVECTOR, SVT, Custom); 1129 1130 // Do not attempt to custom lower other non-256-bit vectors 1131 if (!VT.is256BitVector()) 1132 continue; 1133 1134 setOperationAction(ISD::BUILD_VECTOR, SVT, Custom); 1135 setOperationAction(ISD::VECTOR_SHUFFLE, SVT, Custom); 1136 setOperationAction(ISD::INSERT_VECTOR_ELT, SVT, Custom); 1137 setOperationAction(ISD::EXTRACT_VECTOR_ELT, SVT, Custom); 1138 setOperationAction(ISD::SCALAR_TO_VECTOR, SVT, Custom); 1139 setOperationAction(ISD::INSERT_SUBVECTOR, SVT, Custom); 1140 } 1141 1142 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64. 1143 for (unsigned i = (unsigned)MVT::v32i8; i != (unsigned)MVT::v4i64; ++i) { 1144 MVT::SimpleValueType SVT = (MVT::SimpleValueType)i; 1145 EVT VT = SVT; 1146 1147 // Do not attempt to promote non-256-bit vectors 1148 if (!VT.is256BitVector()) 1149 continue; 1150 1151 setOperationAction(ISD::AND, SVT, Promote); 1152 AddPromotedToType (ISD::AND, SVT, MVT::v4i64); 1153 setOperationAction(ISD::OR, SVT, Promote); 1154 AddPromotedToType (ISD::OR, SVT, MVT::v4i64); 1155 setOperationAction(ISD::XOR, SVT, Promote); 1156 AddPromotedToType (ISD::XOR, SVT, MVT::v4i64); 1157 setOperationAction(ISD::LOAD, SVT, Promote); 1158 AddPromotedToType (ISD::LOAD, SVT, MVT::v4i64); 1159 setOperationAction(ISD::SELECT, SVT, Promote); 1160 AddPromotedToType (ISD::SELECT, SVT, MVT::v4i64); 1161 } 1162 } 1163 1164 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion 1165 // of this type with custom code. 1166 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 1167 VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; VT++) { 1168 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT, 1169 Custom); 1170 } 1171 1172 // We want to custom lower some of our intrinsics. 1173 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 1174 1175 1176 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't 1177 // handle type legalization for these operations here. 1178 // 1179 // FIXME: We really should do custom legalization for addition and 1180 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better 1181 // than generic legalization for 64-bit multiplication-with-overflow, though. 1182 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) { 1183 // Add/Sub/Mul with overflow operations are custom lowered. 1184 MVT VT = IntVTs[i]; 1185 setOperationAction(ISD::SADDO, VT, Custom); 1186 setOperationAction(ISD::UADDO, VT, Custom); 1187 setOperationAction(ISD::SSUBO, VT, Custom); 1188 setOperationAction(ISD::USUBO, VT, Custom); 1189 setOperationAction(ISD::SMULO, VT, Custom); 1190 setOperationAction(ISD::UMULO, VT, Custom); 1191 } 1192 1193 // There are no 8-bit 3-address imul/mul instructions 1194 setOperationAction(ISD::SMULO, MVT::i8, Expand); 1195 setOperationAction(ISD::UMULO, MVT::i8, Expand); 1196 1197 if (!Subtarget->is64Bit()) { 1198 // These libcalls are not available in 32-bit. 1199 setLibcallName(RTLIB::SHL_I128, 0); 1200 setLibcallName(RTLIB::SRL_I128, 0); 1201 setLibcallName(RTLIB::SRA_I128, 0); 1202 } 1203 1204 // We have target-specific dag combine patterns for the following nodes: 1205 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 1206 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 1207 setTargetDAGCombine(ISD::VSELECT); 1208 setTargetDAGCombine(ISD::SELECT); 1209 setTargetDAGCombine(ISD::SHL); 1210 setTargetDAGCombine(ISD::SRA); 1211 setTargetDAGCombine(ISD::SRL); 1212 setTargetDAGCombine(ISD::OR); 1213 setTargetDAGCombine(ISD::AND); 1214 setTargetDAGCombine(ISD::ADD); 1215 setTargetDAGCombine(ISD::FADD); 1216 setTargetDAGCombine(ISD::FSUB); 1217 setTargetDAGCombine(ISD::SUB); 1218 setTargetDAGCombine(ISD::LOAD); 1219 setTargetDAGCombine(ISD::STORE); 1220 setTargetDAGCombine(ISD::ZERO_EXTEND); 1221 setTargetDAGCombine(ISD::SINT_TO_FP); 1222 if (Subtarget->is64Bit()) 1223 setTargetDAGCombine(ISD::MUL); 1224 if (Subtarget->hasBMI()) 1225 setTargetDAGCombine(ISD::XOR); 1226 1227 computeRegisterProperties(); 1228 1229 // On Darwin, -Os means optimize for size without hurting performance, 1230 // do not reduce the limit. 1231 maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores 1232 maxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8; 1233 maxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores 1234 maxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1235 maxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores 1236 maxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1237 setPrefLoopAlignment(4); // 2^4 bytes. 1238 benefitFromCodePlacementOpt = true; 1239 1240 setPrefFunctionAlignment(4); // 2^4 bytes. 1241} 1242 1243 1244EVT X86TargetLowering::getSetCCResultType(EVT VT) const { 1245 if (!VT.isVector()) return MVT::i8; 1246 return VT.changeVectorElementTypeToInteger(); 1247} 1248 1249 1250/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1251/// the desired ByVal argument alignment. 1252static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) { 1253 if (MaxAlign == 16) 1254 return; 1255 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1256 if (VTy->getBitWidth() == 128) 1257 MaxAlign = 16; 1258 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1259 unsigned EltAlign = 0; 1260 getMaxByValAlign(ATy->getElementType(), EltAlign); 1261 if (EltAlign > MaxAlign) 1262 MaxAlign = EltAlign; 1263 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1264 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1265 unsigned EltAlign = 0; 1266 getMaxByValAlign(STy->getElementType(i), EltAlign); 1267 if (EltAlign > MaxAlign) 1268 MaxAlign = EltAlign; 1269 if (MaxAlign == 16) 1270 break; 1271 } 1272 } 1273 return; 1274} 1275 1276/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1277/// function arguments in the caller parameter area. For X86, aggregates 1278/// that contain SSE vectors are placed at 16-byte boundaries while the rest 1279/// are at 4-byte boundaries. 1280unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const { 1281 if (Subtarget->is64Bit()) { 1282 // Max of 8 and alignment of type. 1283 unsigned TyAlign = TD->getABITypeAlignment(Ty); 1284 if (TyAlign > 8) 1285 return TyAlign; 1286 return 8; 1287 } 1288 1289 unsigned Align = 4; 1290 if (Subtarget->hasSSE1()) 1291 getMaxByValAlign(Ty, Align); 1292 return Align; 1293} 1294 1295/// getOptimalMemOpType - Returns the target specific optimal type for load 1296/// and store operations as a result of memset, memcpy, and memmove 1297/// lowering. If DstAlign is zero that means it's safe to destination 1298/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 1299/// means there isn't a need to check it against alignment requirement, 1300/// probably because the source does not need to be loaded. If 1301/// 'IsZeroVal' is true, that means it's safe to return a 1302/// non-scalar-integer type, e.g. empty string source, constant, or loaded 1303/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 1304/// constant so it does not need to be loaded. 1305/// It returns EVT::Other if the type should be determined using generic 1306/// target-independent logic. 1307EVT 1308X86TargetLowering::getOptimalMemOpType(uint64_t Size, 1309 unsigned DstAlign, unsigned SrcAlign, 1310 bool IsZeroVal, 1311 bool MemcpyStrSrc, 1312 MachineFunction &MF) const { 1313 // FIXME: This turns off use of xmm stores for memset/memcpy on targets like 1314 // linux. This is because the stack realignment code can't handle certain 1315 // cases like PR2962. This should be removed when PR2962 is fixed. 1316 const Function *F = MF.getFunction(); 1317 if (IsZeroVal && 1318 !F->hasFnAttr(Attribute::NoImplicitFloat)) { 1319 if (Size >= 16 && 1320 (Subtarget->isUnalignedMemAccessFast() || 1321 ((DstAlign == 0 || DstAlign >= 16) && 1322 (SrcAlign == 0 || SrcAlign >= 16))) && 1323 Subtarget->getStackAlignment() >= 16) { 1324 if (Subtarget->getStackAlignment() >= 32) { 1325 if (Subtarget->hasAVX2()) 1326 return MVT::v8i32; 1327 if (Subtarget->hasAVX()) 1328 return MVT::v8f32; 1329 } 1330 if (Subtarget->hasSSE2()) 1331 return MVT::v4i32; 1332 if (Subtarget->hasSSE1()) 1333 return MVT::v4f32; 1334 } else if (!MemcpyStrSrc && Size >= 8 && 1335 !Subtarget->is64Bit() && 1336 Subtarget->getStackAlignment() >= 8 && 1337 Subtarget->hasSSE2()) { 1338 // Do not use f64 to lower memcpy if source is string constant. It's 1339 // better to use i32 to avoid the loads. 1340 return MVT::f64; 1341 } 1342 } 1343 if (Subtarget->is64Bit() && Size >= 8) 1344 return MVT::i64; 1345 return MVT::i32; 1346} 1347 1348/// getJumpTableEncoding - Return the entry encoding for a jump table in the 1349/// current function. The returned value is a member of the 1350/// MachineJumpTableInfo::JTEntryKind enum. 1351unsigned X86TargetLowering::getJumpTableEncoding() const { 1352 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF 1353 // symbol. 1354 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1355 Subtarget->isPICStyleGOT()) 1356 return MachineJumpTableInfo::EK_Custom32; 1357 1358 // Otherwise, use the normal jump table encoding heuristics. 1359 return TargetLowering::getJumpTableEncoding(); 1360} 1361 1362const MCExpr * 1363X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, 1364 const MachineBasicBlock *MBB, 1365 unsigned uid,MCContext &Ctx) const{ 1366 assert(getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1367 Subtarget->isPICStyleGOT()); 1368 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF 1369 // entries. 1370 return MCSymbolRefExpr::Create(MBB->getSymbol(), 1371 MCSymbolRefExpr::VK_GOTOFF, Ctx); 1372} 1373 1374/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 1375/// jumptable. 1376SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, 1377 SelectionDAG &DAG) const { 1378 if (!Subtarget->is64Bit()) 1379 // This doesn't have DebugLoc associated with it, but is not really the 1380 // same as a Register. 1381 return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()); 1382 return Table; 1383} 1384 1385/// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 1386/// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 1387/// MCExpr. 1388const MCExpr *X86TargetLowering:: 1389getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, 1390 MCContext &Ctx) const { 1391 // X86-64 uses RIP relative addressing based on the jump table label. 1392 if (Subtarget->isPICStyleRIPRel()) 1393 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 1394 1395 // Otherwise, the reference is relative to the PIC base. 1396 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx); 1397} 1398 1399// FIXME: Why this routine is here? Move to RegInfo! 1400std::pair<const TargetRegisterClass*, uint8_t> 1401X86TargetLowering::findRepresentativeClass(EVT VT) const{ 1402 const TargetRegisterClass *RRC = 0; 1403 uint8_t Cost = 1; 1404 switch (VT.getSimpleVT().SimpleTy) { 1405 default: 1406 return TargetLowering::findRepresentativeClass(VT); 1407 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: 1408 RRC = (Subtarget->is64Bit() 1409 ? X86::GR64RegisterClass : X86::GR32RegisterClass); 1410 break; 1411 case MVT::x86mmx: 1412 RRC = X86::VR64RegisterClass; 1413 break; 1414 case MVT::f32: case MVT::f64: 1415 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1416 case MVT::v4f32: case MVT::v2f64: 1417 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32: 1418 case MVT::v4f64: 1419 RRC = X86::VR128RegisterClass; 1420 break; 1421 } 1422 return std::make_pair(RRC, Cost); 1423} 1424 1425bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace, 1426 unsigned &Offset) const { 1427 if (!Subtarget->isTargetLinux()) 1428 return false; 1429 1430 if (Subtarget->is64Bit()) { 1431 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs: 1432 Offset = 0x28; 1433 if (getTargetMachine().getCodeModel() == CodeModel::Kernel) 1434 AddressSpace = 256; 1435 else 1436 AddressSpace = 257; 1437 } else { 1438 // %gs:0x14 on i386 1439 Offset = 0x14; 1440 AddressSpace = 256; 1441 } 1442 return true; 1443} 1444 1445 1446//===----------------------------------------------------------------------===// 1447// Return Value Calling Convention Implementation 1448//===----------------------------------------------------------------------===// 1449 1450#include "X86GenCallingConv.inc" 1451 1452bool 1453X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, 1454 MachineFunction &MF, bool isVarArg, 1455 const SmallVectorImpl<ISD::OutputArg> &Outs, 1456 LLVMContext &Context) const { 1457 SmallVector<CCValAssign, 16> RVLocs; 1458 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1459 RVLocs, Context); 1460 return CCInfo.CheckReturn(Outs, RetCC_X86); 1461} 1462 1463SDValue 1464X86TargetLowering::LowerReturn(SDValue Chain, 1465 CallingConv::ID CallConv, bool isVarArg, 1466 const SmallVectorImpl<ISD::OutputArg> &Outs, 1467 const SmallVectorImpl<SDValue> &OutVals, 1468 DebugLoc dl, SelectionDAG &DAG) const { 1469 MachineFunction &MF = DAG.getMachineFunction(); 1470 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1471 1472 SmallVector<CCValAssign, 16> RVLocs; 1473 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1474 RVLocs, *DAG.getContext()); 1475 CCInfo.AnalyzeReturn(Outs, RetCC_X86); 1476 1477 // Add the regs to the liveout set for the function. 1478 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 1479 for (unsigned i = 0; i != RVLocs.size(); ++i) 1480 if (RVLocs[i].isRegLoc() && !MRI.isLiveOut(RVLocs[i].getLocReg())) 1481 MRI.addLiveOut(RVLocs[i].getLocReg()); 1482 1483 SDValue Flag; 1484 1485 SmallVector<SDValue, 6> RetOps; 1486 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1487 // Operand #1 = Bytes To Pop 1488 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), 1489 MVT::i16)); 1490 1491 // Copy the result values into the output registers. 1492 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1493 CCValAssign &VA = RVLocs[i]; 1494 assert(VA.isRegLoc() && "Can only return in registers!"); 1495 SDValue ValToCopy = OutVals[i]; 1496 EVT ValVT = ValToCopy.getValueType(); 1497 1498 // If this is x86-64, and we disabled SSE, we can't return FP values, 1499 // or SSE or MMX vectors. 1500 if ((ValVT == MVT::f32 || ValVT == MVT::f64 || 1501 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) && 1502 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) { 1503 report_fatal_error("SSE register return with SSE disabled"); 1504 } 1505 // Likewise we can't return F64 values with SSE1 only. gcc does so, but 1506 // llvm-gcc has never done it right and no one has noticed, so this 1507 // should be OK for now. 1508 if (ValVT == MVT::f64 && 1509 (Subtarget->is64Bit() && !Subtarget->hasSSE2())) 1510 report_fatal_error("SSE2 register return with SSE2 disabled"); 1511 1512 // Returns in ST0/ST1 are handled specially: these are pushed as operands to 1513 // the RET instruction and handled by the FP Stackifier. 1514 if (VA.getLocReg() == X86::ST0 || 1515 VA.getLocReg() == X86::ST1) { 1516 // If this is a copy from an xmm register to ST(0), use an FPExtend to 1517 // change the value to the FP stack register class. 1518 if (isScalarFPTypeInSSEReg(VA.getValVT())) 1519 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy); 1520 RetOps.push_back(ValToCopy); 1521 // Don't emit a copytoreg. 1522 continue; 1523 } 1524 1525 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64 1526 // which is returned in RAX / RDX. 1527 if (Subtarget->is64Bit()) { 1528 if (ValVT == MVT::x86mmx) { 1529 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { 1530 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy); 1531 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 1532 ValToCopy); 1533 // If we don't have SSE2 available, convert to v4f32 so the generated 1534 // register is legal. 1535 if (!Subtarget->hasSSE2()) 1536 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy); 1537 } 1538 } 1539 } 1540 1541 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag); 1542 Flag = Chain.getValue(1); 1543 } 1544 1545 // The x86-64 ABI for returning structs by value requires that we copy 1546 // the sret argument into %rax for the return. We saved the argument into 1547 // a virtual register in the entry block, so now we copy the value out 1548 // and into %rax. 1549 if (Subtarget->is64Bit() && 1550 DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { 1551 MachineFunction &MF = DAG.getMachineFunction(); 1552 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1553 unsigned Reg = FuncInfo->getSRetReturnReg(); 1554 assert(Reg && 1555 "SRetReturnReg should have been set in LowerFormalArguments()."); 1556 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); 1557 1558 Chain = DAG.getCopyToReg(Chain, dl, X86::RAX, Val, Flag); 1559 Flag = Chain.getValue(1); 1560 1561 // RAX now acts like a return value. 1562 MRI.addLiveOut(X86::RAX); 1563 } 1564 1565 RetOps[0] = Chain; // Update chain. 1566 1567 // Add the flag if we have it. 1568 if (Flag.getNode()) 1569 RetOps.push_back(Flag); 1570 1571 return DAG.getNode(X86ISD::RET_FLAG, dl, 1572 MVT::Other, &RetOps[0], RetOps.size()); 1573} 1574 1575bool X86TargetLowering::isUsedByReturnOnly(SDNode *N) const { 1576 if (N->getNumValues() != 1) 1577 return false; 1578 if (!N->hasNUsesOfValue(1, 0)) 1579 return false; 1580 1581 SDNode *Copy = *N->use_begin(); 1582 if (Copy->getOpcode() != ISD::CopyToReg && 1583 Copy->getOpcode() != ISD::FP_EXTEND) 1584 return false; 1585 1586 bool HasRet = false; 1587 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1588 UI != UE; ++UI) { 1589 if (UI->getOpcode() != X86ISD::RET_FLAG) 1590 return false; 1591 HasRet = true; 1592 } 1593 1594 return HasRet; 1595} 1596 1597EVT 1598X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, 1599 ISD::NodeType ExtendKind) const { 1600 MVT ReturnMVT; 1601 // TODO: Is this also valid on 32-bit? 1602 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND) 1603 ReturnMVT = MVT::i8; 1604 else 1605 ReturnMVT = MVT::i32; 1606 1607 EVT MinVT = getRegisterType(Context, ReturnMVT); 1608 return VT.bitsLT(MinVT) ? MinVT : VT; 1609} 1610 1611/// LowerCallResult - Lower the result values of a call into the 1612/// appropriate copies out of appropriate physical registers. 1613/// 1614SDValue 1615X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1616 CallingConv::ID CallConv, bool isVarArg, 1617 const SmallVectorImpl<ISD::InputArg> &Ins, 1618 DebugLoc dl, SelectionDAG &DAG, 1619 SmallVectorImpl<SDValue> &InVals) const { 1620 1621 // Assign locations to each value returned by this call. 1622 SmallVector<CCValAssign, 16> RVLocs; 1623 bool Is64Bit = Subtarget->is64Bit(); 1624 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1625 getTargetMachine(), RVLocs, *DAG.getContext()); 1626 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 1627 1628 // Copy all of the result registers out of their specified physreg. 1629 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1630 CCValAssign &VA = RVLocs[i]; 1631 EVT CopyVT = VA.getValVT(); 1632 1633 // If this is x86-64, and we disabled SSE, we can't return FP values 1634 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) && 1635 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) { 1636 report_fatal_error("SSE register return with SSE disabled"); 1637 } 1638 1639 SDValue Val; 1640 1641 // If this is a call to a function that returns an fp value on the floating 1642 // point stack, we must guarantee the the value is popped from the stack, so 1643 // a CopyFromReg is not good enough - the copy instruction may be eliminated 1644 // if the return value is not used. We use the FpPOP_RETVAL instruction 1645 // instead. 1646 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) { 1647 // If we prefer to use the value in xmm registers, copy it out as f80 and 1648 // use a truncate to move it from fp stack reg to xmm reg. 1649 if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80; 1650 SDValue Ops[] = { Chain, InFlag }; 1651 Chain = SDValue(DAG.getMachineNode(X86::FpPOP_RETVAL, dl, CopyVT, 1652 MVT::Other, MVT::Glue, Ops, 2), 1); 1653 Val = Chain.getValue(0); 1654 1655 // Round the f80 to the right size, which also moves it to the appropriate 1656 // xmm register. 1657 if (CopyVT != VA.getValVT()) 1658 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val, 1659 // This truncation won't change the value. 1660 DAG.getIntPtrConstant(1)); 1661 } else { 1662 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), 1663 CopyVT, InFlag).getValue(1); 1664 Val = Chain.getValue(0); 1665 } 1666 InFlag = Chain.getValue(2); 1667 InVals.push_back(Val); 1668 } 1669 1670 return Chain; 1671} 1672 1673 1674//===----------------------------------------------------------------------===// 1675// C & StdCall & Fast Calling Convention implementation 1676//===----------------------------------------------------------------------===// 1677// StdCall calling convention seems to be standard for many Windows' API 1678// routines and around. It differs from C calling convention just a little: 1679// callee should clean up the stack, not caller. Symbols should be also 1680// decorated in some fancy way :) It doesn't support any vector arguments. 1681// For info on fast calling convention see Fast Calling Convention (tail call) 1682// implementation LowerX86_32FastCCCallTo. 1683 1684/// CallIsStructReturn - Determines whether a call uses struct return 1685/// semantics. 1686static bool CallIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) { 1687 if (Outs.empty()) 1688 return false; 1689 1690 return Outs[0].Flags.isSRet(); 1691} 1692 1693/// ArgsAreStructReturn - Determines whether a function uses struct 1694/// return semantics. 1695static bool 1696ArgsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) { 1697 if (Ins.empty()) 1698 return false; 1699 1700 return Ins[0].Flags.isSRet(); 1701} 1702 1703/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1704/// by "Src" to address "Dst" with size and alignment information specified by 1705/// the specific parameter attribute. The copy will be passed as a byval 1706/// function parameter. 1707static SDValue 1708CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1709 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1710 DebugLoc dl) { 1711 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1712 1713 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1714 /*isVolatile*/false, /*AlwaysInline=*/true, 1715 MachinePointerInfo(), MachinePointerInfo()); 1716} 1717 1718/// IsTailCallConvention - Return true if the calling convention is one that 1719/// supports tail call optimization. 1720static bool IsTailCallConvention(CallingConv::ID CC) { 1721 return (CC == CallingConv::Fast || CC == CallingConv::GHC); 1722} 1723 1724bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1725 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls) 1726 return false; 1727 1728 CallSite CS(CI); 1729 CallingConv::ID CalleeCC = CS.getCallingConv(); 1730 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C) 1731 return false; 1732 1733 return true; 1734} 1735 1736/// FuncIsMadeTailCallSafe - Return true if the function is being made into 1737/// a tailcall target by changing its ABI. 1738static bool FuncIsMadeTailCallSafe(CallingConv::ID CC, 1739 bool GuaranteedTailCallOpt) { 1740 return GuaranteedTailCallOpt && IsTailCallConvention(CC); 1741} 1742 1743SDValue 1744X86TargetLowering::LowerMemArgument(SDValue Chain, 1745 CallingConv::ID CallConv, 1746 const SmallVectorImpl<ISD::InputArg> &Ins, 1747 DebugLoc dl, SelectionDAG &DAG, 1748 const CCValAssign &VA, 1749 MachineFrameInfo *MFI, 1750 unsigned i) const { 1751 // Create the nodes corresponding to a load from this parameter slot. 1752 ISD::ArgFlagsTy Flags = Ins[i].Flags; 1753 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv, 1754 getTargetMachine().Options.GuaranteedTailCallOpt); 1755 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); 1756 EVT ValVT; 1757 1758 // If value is passed by pointer we have address passed instead of the value 1759 // itself. 1760 if (VA.getLocInfo() == CCValAssign::Indirect) 1761 ValVT = VA.getLocVT(); 1762 else 1763 ValVT = VA.getValVT(); 1764 1765 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1766 // changed with more analysis. 1767 // In case of tail call optimization mark all arguments mutable. Since they 1768 // could be overwritten by lowering of arguments in case of a tail call. 1769 if (Flags.isByVal()) { 1770 unsigned Bytes = Flags.getByValSize(); 1771 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 1772 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable); 1773 return DAG.getFrameIndex(FI, getPointerTy()); 1774 } else { 1775 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, 1776 VA.getLocMemOffset(), isImmutable); 1777 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1778 return DAG.getLoad(ValVT, dl, Chain, FIN, 1779 MachinePointerInfo::getFixedStack(FI), 1780 false, false, false, 0); 1781 } 1782} 1783 1784SDValue 1785X86TargetLowering::LowerFormalArguments(SDValue Chain, 1786 CallingConv::ID CallConv, 1787 bool isVarArg, 1788 const SmallVectorImpl<ISD::InputArg> &Ins, 1789 DebugLoc dl, 1790 SelectionDAG &DAG, 1791 SmallVectorImpl<SDValue> &InVals) 1792 const { 1793 MachineFunction &MF = DAG.getMachineFunction(); 1794 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1795 1796 const Function* Fn = MF.getFunction(); 1797 if (Fn->hasExternalLinkage() && 1798 Subtarget->isTargetCygMing() && 1799 Fn->getName() == "main") 1800 FuncInfo->setForceFramePointer(true); 1801 1802 MachineFrameInfo *MFI = MF.getFrameInfo(); 1803 bool Is64Bit = Subtarget->is64Bit(); 1804 bool IsWindows = Subtarget->isTargetWindows(); 1805 bool IsWin64 = Subtarget->isTargetWin64(); 1806 1807 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 1808 "Var args not supported with calling convention fastcc or ghc"); 1809 1810 // Assign locations to all of the incoming arguments. 1811 SmallVector<CCValAssign, 16> ArgLocs; 1812 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1813 ArgLocs, *DAG.getContext()); 1814 1815 // Allocate shadow area for Win64 1816 if (IsWin64) { 1817 CCInfo.AllocateStack(32, 8); 1818 } 1819 1820 CCInfo.AnalyzeFormalArguments(Ins, CC_X86); 1821 1822 unsigned LastVal = ~0U; 1823 SDValue ArgValue; 1824 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1825 CCValAssign &VA = ArgLocs[i]; 1826 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1827 // places. 1828 assert(VA.getValNo() != LastVal && 1829 "Don't support value assigned to multiple locs yet"); 1830 (void)LastVal; 1831 LastVal = VA.getValNo(); 1832 1833 if (VA.isRegLoc()) { 1834 EVT RegVT = VA.getLocVT(); 1835 TargetRegisterClass *RC = NULL; 1836 if (RegVT == MVT::i32) 1837 RC = X86::GR32RegisterClass; 1838 else if (Is64Bit && RegVT == MVT::i64) 1839 RC = X86::GR64RegisterClass; 1840 else if (RegVT == MVT::f32) 1841 RC = X86::FR32RegisterClass; 1842 else if (RegVT == MVT::f64) 1843 RC = X86::FR64RegisterClass; 1844 else if (RegVT.isVector() && RegVT.getSizeInBits() == 256) 1845 RC = X86::VR256RegisterClass; 1846 else if (RegVT.isVector() && RegVT.getSizeInBits() == 128) 1847 RC = X86::VR128RegisterClass; 1848 else if (RegVT == MVT::x86mmx) 1849 RC = X86::VR64RegisterClass; 1850 else 1851 llvm_unreachable("Unknown argument type!"); 1852 1853 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1854 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 1855 1856 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1857 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1858 // right size. 1859 if (VA.getLocInfo() == CCValAssign::SExt) 1860 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 1861 DAG.getValueType(VA.getValVT())); 1862 else if (VA.getLocInfo() == CCValAssign::ZExt) 1863 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 1864 DAG.getValueType(VA.getValVT())); 1865 else if (VA.getLocInfo() == CCValAssign::BCvt) 1866 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 1867 1868 if (VA.isExtInLoc()) { 1869 // Handle MMX values passed in XMM regs. 1870 if (RegVT.isVector()) { 1871 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), 1872 ArgValue); 1873 } else 1874 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 1875 } 1876 } else { 1877 assert(VA.isMemLoc()); 1878 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i); 1879 } 1880 1881 // If value is passed via pointer - do a load. 1882 if (VA.getLocInfo() == CCValAssign::Indirect) 1883 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, 1884 MachinePointerInfo(), false, false, false, 0); 1885 1886 InVals.push_back(ArgValue); 1887 } 1888 1889 // The x86-64 ABI for returning structs by value requires that we copy 1890 // the sret argument into %rax for the return. Save the argument into 1891 // a virtual register so that we can access it from the return points. 1892 if (Is64Bit && MF.getFunction()->hasStructRetAttr()) { 1893 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1894 unsigned Reg = FuncInfo->getSRetReturnReg(); 1895 if (!Reg) { 1896 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); 1897 FuncInfo->setSRetReturnReg(Reg); 1898 } 1899 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 1900 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 1901 } 1902 1903 unsigned StackSize = CCInfo.getNextStackOffset(); 1904 // Align stack specially for tail calls. 1905 if (FuncIsMadeTailCallSafe(CallConv, 1906 MF.getTarget().Options.GuaranteedTailCallOpt)) 1907 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1908 1909 // If the function takes variable number of arguments, make a frame index for 1910 // the start of the first vararg value... for expansion of llvm.va_start. 1911 if (isVarArg) { 1912 if (Is64Bit || (CallConv != CallingConv::X86_FastCall && 1913 CallConv != CallingConv::X86_ThisCall)) { 1914 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,true)); 1915 } 1916 if (Is64Bit) { 1917 unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0; 1918 1919 // FIXME: We should really autogenerate these arrays 1920 static const unsigned GPR64ArgRegsWin64[] = { 1921 X86::RCX, X86::RDX, X86::R8, X86::R9 1922 }; 1923 static const unsigned GPR64ArgRegs64Bit[] = { 1924 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1925 }; 1926 static const unsigned XMMArgRegs64Bit[] = { 1927 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1928 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1929 }; 1930 const unsigned *GPR64ArgRegs; 1931 unsigned NumXMMRegs = 0; 1932 1933 if (IsWin64) { 1934 // The XMM registers which might contain var arg parameters are shadowed 1935 // in their paired GPR. So we only need to save the GPR to their home 1936 // slots. 1937 TotalNumIntRegs = 4; 1938 GPR64ArgRegs = GPR64ArgRegsWin64; 1939 } else { 1940 TotalNumIntRegs = 6; TotalNumXMMRegs = 8; 1941 GPR64ArgRegs = GPR64ArgRegs64Bit; 1942 1943 NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs64Bit, 1944 TotalNumXMMRegs); 1945 } 1946 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 1947 TotalNumIntRegs); 1948 1949 bool NoImplicitFloatOps = Fn->hasFnAttr(Attribute::NoImplicitFloat); 1950 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) && 1951 "SSE register cannot be used when SSE is disabled!"); 1952 assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat && 1953 NoImplicitFloatOps) && 1954 "SSE register cannot be used when SSE is disabled!"); 1955 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps || 1956 !Subtarget->hasSSE1()) 1957 // Kernel mode asks for SSE to be disabled, so don't push them 1958 // on the stack. 1959 TotalNumXMMRegs = 0; 1960 1961 if (IsWin64) { 1962 const TargetFrameLowering &TFI = *getTargetMachine().getFrameLowering(); 1963 // Get to the caller-allocated home save location. Add 8 to account 1964 // for the return address. 1965 int HomeOffset = TFI.getOffsetOfLocalArea() + 8; 1966 FuncInfo->setRegSaveFrameIndex( 1967 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false)); 1968 // Fixup to set vararg frame on shadow area (4 x i64). 1969 if (NumIntRegs < 4) 1970 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex()); 1971 } else { 1972 // For X86-64, if there are vararg parameters that are passed via 1973 // registers, then we must store them to their spots on the stack so 1974 // they may be loaded by deferencing the result of va_next. 1975 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8); 1976 FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16); 1977 FuncInfo->setRegSaveFrameIndex( 1978 MFI->CreateStackObject(TotalNumIntRegs * 8 + TotalNumXMMRegs * 16, 16, 1979 false)); 1980 } 1981 1982 // Store the integer parameter registers. 1983 SmallVector<SDValue, 8> MemOps; 1984 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 1985 getPointerTy()); 1986 unsigned Offset = FuncInfo->getVarArgsGPOffset(); 1987 for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { 1988 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN, 1989 DAG.getIntPtrConstant(Offset)); 1990 unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs], 1991 X86::GR64RegisterClass); 1992 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 1993 SDValue Store = 1994 DAG.getStore(Val.getValue(1), dl, Val, FIN, 1995 MachinePointerInfo::getFixedStack( 1996 FuncInfo->getRegSaveFrameIndex(), Offset), 1997 false, false, 0); 1998 MemOps.push_back(Store); 1999 Offset += 8; 2000 } 2001 2002 if (TotalNumXMMRegs != 0 && NumXMMRegs != TotalNumXMMRegs) { 2003 // Now store the XMM (fp + vector) parameter registers. 2004 SmallVector<SDValue, 11> SaveXMMOps; 2005 SaveXMMOps.push_back(Chain); 2006 2007 unsigned AL = MF.addLiveIn(X86::AL, X86::GR8RegisterClass); 2008 SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8); 2009 SaveXMMOps.push_back(ALVal); 2010 2011 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2012 FuncInfo->getRegSaveFrameIndex())); 2013 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2014 FuncInfo->getVarArgsFPOffset())); 2015 2016 for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { 2017 unsigned VReg = MF.addLiveIn(XMMArgRegs64Bit[NumXMMRegs], 2018 X86::VR128RegisterClass); 2019 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32); 2020 SaveXMMOps.push_back(Val); 2021 } 2022 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl, 2023 MVT::Other, 2024 &SaveXMMOps[0], SaveXMMOps.size())); 2025 } 2026 2027 if (!MemOps.empty()) 2028 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2029 &MemOps[0], MemOps.size()); 2030 } 2031 } 2032 2033 // Some CCs need callee pop. 2034 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2035 MF.getTarget().Options.GuaranteedTailCallOpt)) { 2036 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything. 2037 } else { 2038 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing. 2039 // If this is an sret function, the return should pop the hidden pointer. 2040 if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows && 2041 ArgsAreStructReturn(Ins)) 2042 FuncInfo->setBytesToPopOnReturn(4); 2043 } 2044 2045 if (!Is64Bit) { 2046 // RegSaveFrameIndex is X86-64 only. 2047 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA); 2048 if (CallConv == CallingConv::X86_FastCall || 2049 CallConv == CallingConv::X86_ThisCall) 2050 // fastcc functions can't have varargs. 2051 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA); 2052 } 2053 2054 FuncInfo->setArgumentStackSize(StackSize); 2055 2056 return Chain; 2057} 2058 2059SDValue 2060X86TargetLowering::LowerMemOpCallTo(SDValue Chain, 2061 SDValue StackPtr, SDValue Arg, 2062 DebugLoc dl, SelectionDAG &DAG, 2063 const CCValAssign &VA, 2064 ISD::ArgFlagsTy Flags) const { 2065 unsigned LocMemOffset = VA.getLocMemOffset(); 2066 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 2067 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 2068 if (Flags.isByVal()) 2069 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 2070 2071 return DAG.getStore(Chain, dl, Arg, PtrOff, 2072 MachinePointerInfo::getStack(LocMemOffset), 2073 false, false, 0); 2074} 2075 2076/// EmitTailCallLoadRetAddr - Emit a load of return address if tail call 2077/// optimization is performed and it is required. 2078SDValue 2079X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, 2080 SDValue &OutRetAddr, SDValue Chain, 2081 bool IsTailCall, bool Is64Bit, 2082 int FPDiff, DebugLoc dl) const { 2083 // Adjust the Return address stack slot. 2084 EVT VT = getPointerTy(); 2085 OutRetAddr = getReturnAddressFrameIndex(DAG); 2086 2087 // Load the "old" Return address. 2088 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(), 2089 false, false, false, 0); 2090 return SDValue(OutRetAddr.getNode(), 1); 2091} 2092 2093/// EmitTailCallStoreRetAddr - Emit a store of the return address if tail call 2094/// optimization is performed and it is required (FPDiff!=0). 2095static SDValue 2096EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, 2097 SDValue Chain, SDValue RetAddrFrIdx, 2098 bool Is64Bit, int FPDiff, DebugLoc dl) { 2099 // Store the return address to the appropriate stack slot. 2100 if (!FPDiff) return Chain; 2101 // Calculate the new stack slot for the return address. 2102 int SlotSize = Is64Bit ? 8 : 4; 2103 int NewReturnAddrFI = 2104 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false); 2105 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2106 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 2107 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, 2108 MachinePointerInfo::getFixedStack(NewReturnAddrFI), 2109 false, false, 0); 2110 return Chain; 2111} 2112 2113SDValue 2114X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee, 2115 CallingConv::ID CallConv, bool isVarArg, 2116 bool &isTailCall, 2117 const SmallVectorImpl<ISD::OutputArg> &Outs, 2118 const SmallVectorImpl<SDValue> &OutVals, 2119 const SmallVectorImpl<ISD::InputArg> &Ins, 2120 DebugLoc dl, SelectionDAG &DAG, 2121 SmallVectorImpl<SDValue> &InVals) const { 2122 MachineFunction &MF = DAG.getMachineFunction(); 2123 bool Is64Bit = Subtarget->is64Bit(); 2124 bool IsWin64 = Subtarget->isTargetWin64(); 2125 bool IsWindows = Subtarget->isTargetWindows(); 2126 bool IsStructRet = CallIsStructReturn(Outs); 2127 bool IsSibcall = false; 2128 2129 if (MF.getTarget().Options.DisableTailCalls) 2130 isTailCall = false; 2131 2132 if (isTailCall) { 2133 // Check if it's really possible to do a tail call. 2134 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 2135 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 2136 Outs, OutVals, Ins, DAG); 2137 2138 // Sibcalls are automatically detected tailcalls which do not require 2139 // ABI changes. 2140 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall) 2141 IsSibcall = true; 2142 2143 if (isTailCall) 2144 ++NumTailCalls; 2145 } 2146 2147 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 2148 "Var args not supported with calling convention fastcc or ghc"); 2149 2150 // Analyze operands of the call, assigning locations to each operand. 2151 SmallVector<CCValAssign, 16> ArgLocs; 2152 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 2153 ArgLocs, *DAG.getContext()); 2154 2155 // Allocate shadow area for Win64 2156 if (IsWin64) { 2157 CCInfo.AllocateStack(32, 8); 2158 } 2159 2160 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2161 2162 // Get a count of how many bytes are to be pushed on the stack. 2163 unsigned NumBytes = CCInfo.getNextStackOffset(); 2164 if (IsSibcall) 2165 // This is a sibcall. The memory operands are available in caller's 2166 // own caller's stack. 2167 NumBytes = 0; 2168 else if (getTargetMachine().Options.GuaranteedTailCallOpt && 2169 IsTailCallConvention(CallConv)) 2170 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 2171 2172 int FPDiff = 0; 2173 if (isTailCall && !IsSibcall) { 2174 // Lower arguments at fp - stackoffset + fpdiff. 2175 unsigned NumBytesCallerPushed = 2176 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 2177 FPDiff = NumBytesCallerPushed - NumBytes; 2178 2179 // Set the delta of movement of the returnaddr stackslot. 2180 // But only set if delta is greater than previous delta. 2181 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 2182 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 2183 } 2184 2185 if (!IsSibcall) 2186 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 2187 2188 SDValue RetAddrFrIdx; 2189 // Load return address for tail calls. 2190 if (isTailCall && FPDiff) 2191 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall, 2192 Is64Bit, FPDiff, dl); 2193 2194 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2195 SmallVector<SDValue, 8> MemOpChains; 2196 SDValue StackPtr; 2197 2198 // Walk the register/memloc assignments, inserting copies/loads. In the case 2199 // of tail call optimization arguments are handle later. 2200 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2201 CCValAssign &VA = ArgLocs[i]; 2202 EVT RegVT = VA.getLocVT(); 2203 SDValue Arg = OutVals[i]; 2204 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2205 bool isByVal = Flags.isByVal(); 2206 2207 // Promote the value if needed. 2208 switch (VA.getLocInfo()) { 2209 default: llvm_unreachable("Unknown loc info!"); 2210 case CCValAssign::Full: break; 2211 case CCValAssign::SExt: 2212 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg); 2213 break; 2214 case CCValAssign::ZExt: 2215 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg); 2216 break; 2217 case CCValAssign::AExt: 2218 if (RegVT.isVector() && RegVT.getSizeInBits() == 128) { 2219 // Special case: passing MMX values in XMM registers. 2220 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 2221 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); 2222 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg); 2223 } else 2224 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg); 2225 break; 2226 case CCValAssign::BCvt: 2227 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg); 2228 break; 2229 case CCValAssign::Indirect: { 2230 // Store the argument. 2231 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 2232 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 2233 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot, 2234 MachinePointerInfo::getFixedStack(FI), 2235 false, false, 0); 2236 Arg = SpillSlot; 2237 break; 2238 } 2239 } 2240 2241 if (VA.isRegLoc()) { 2242 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2243 if (isVarArg && IsWin64) { 2244 // Win64 ABI requires argument XMM reg to be copied to the corresponding 2245 // shadow reg if callee is a varargs function. 2246 unsigned ShadowReg = 0; 2247 switch (VA.getLocReg()) { 2248 case X86::XMM0: ShadowReg = X86::RCX; break; 2249 case X86::XMM1: ShadowReg = X86::RDX; break; 2250 case X86::XMM2: ShadowReg = X86::R8; break; 2251 case X86::XMM3: ShadowReg = X86::R9; break; 2252 } 2253 if (ShadowReg) 2254 RegsToPass.push_back(std::make_pair(ShadowReg, Arg)); 2255 } 2256 } else if (!IsSibcall && (!isTailCall || isByVal)) { 2257 assert(VA.isMemLoc()); 2258 if (StackPtr.getNode() == 0) 2259 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, getPointerTy()); 2260 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 2261 dl, DAG, VA, Flags)); 2262 } 2263 } 2264 2265 if (!MemOpChains.empty()) 2266 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2267 &MemOpChains[0], MemOpChains.size()); 2268 2269 // Build a sequence of copy-to-reg nodes chained together with token chain 2270 // and flag operands which copy the outgoing args into registers. 2271 SDValue InFlag; 2272 // Tail call byval lowering might overwrite argument registers so in case of 2273 // tail call optimization the copies to registers are lowered later. 2274 if (!isTailCall) 2275 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2276 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2277 RegsToPass[i].second, InFlag); 2278 InFlag = Chain.getValue(1); 2279 } 2280 2281 if (Subtarget->isPICStyleGOT()) { 2282 // ELF / PIC requires GOT in the EBX register before function calls via PLT 2283 // GOT pointer. 2284 if (!isTailCall) { 2285 Chain = DAG.getCopyToReg(Chain, dl, X86::EBX, 2286 DAG.getNode(X86ISD::GlobalBaseReg, 2287 DebugLoc(), getPointerTy()), 2288 InFlag); 2289 InFlag = Chain.getValue(1); 2290 } else { 2291 // If we are tail calling and generating PIC/GOT style code load the 2292 // address of the callee into ECX. The value in ecx is used as target of 2293 // the tail jump. This is done to circumvent the ebx/callee-saved problem 2294 // for tail calls on PIC/GOT architectures. Normally we would just put the 2295 // address of GOT into ebx and then call target@PLT. But for tail calls 2296 // ebx would be restored (since ebx is callee saved) before jumping to the 2297 // target@PLT. 2298 2299 // Note: The actual moving to ECX is done further down. 2300 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 2301 if (G && !G->getGlobal()->hasHiddenVisibility() && 2302 !G->getGlobal()->hasProtectedVisibility()) 2303 Callee = LowerGlobalAddress(Callee, DAG); 2304 else if (isa<ExternalSymbolSDNode>(Callee)) 2305 Callee = LowerExternalSymbol(Callee, DAG); 2306 } 2307 } 2308 2309 if (Is64Bit && isVarArg && !IsWin64) { 2310 // From AMD64 ABI document: 2311 // For calls that may call functions that use varargs or stdargs 2312 // (prototype-less calls or calls to functions containing ellipsis (...) in 2313 // the declaration) %al is used as hidden argument to specify the number 2314 // of SSE registers used. The contents of %al do not need to match exactly 2315 // the number of registers, but must be an ubound on the number of SSE 2316 // registers used and is in the range 0 - 8 inclusive. 2317 2318 // Count the number of XMM registers allocated. 2319 static const unsigned XMMArgRegs[] = { 2320 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2321 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2322 }; 2323 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 2324 assert((Subtarget->hasSSE1() || !NumXMMRegs) 2325 && "SSE registers cannot be used when SSE is disabled"); 2326 2327 Chain = DAG.getCopyToReg(Chain, dl, X86::AL, 2328 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 2329 InFlag = Chain.getValue(1); 2330 } 2331 2332 2333 // For tail calls lower the arguments to the 'real' stack slot. 2334 if (isTailCall) { 2335 // Force all the incoming stack arguments to be loaded from the stack 2336 // before any new outgoing arguments are stored to the stack, because the 2337 // outgoing stack slots may alias the incoming argument stack slots, and 2338 // the alias isn't otherwise explicit. This is slightly more conservative 2339 // than necessary, because it means that each store effectively depends 2340 // on every argument instead of just those arguments it would clobber. 2341 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain); 2342 2343 SmallVector<SDValue, 8> MemOpChains2; 2344 SDValue FIN; 2345 int FI = 0; 2346 // Do not flag preceding copytoreg stuff together with the following stuff. 2347 InFlag = SDValue(); 2348 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2349 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2350 CCValAssign &VA = ArgLocs[i]; 2351 if (VA.isRegLoc()) 2352 continue; 2353 assert(VA.isMemLoc()); 2354 SDValue Arg = OutVals[i]; 2355 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2356 // Create frame index. 2357 int32_t Offset = VA.getLocMemOffset()+FPDiff; 2358 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; 2359 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 2360 FIN = DAG.getFrameIndex(FI, getPointerTy()); 2361 2362 if (Flags.isByVal()) { 2363 // Copy relative to framepointer. 2364 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 2365 if (StackPtr.getNode() == 0) 2366 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, 2367 getPointerTy()); 2368 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source); 2369 2370 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, 2371 ArgChain, 2372 Flags, DAG, dl)); 2373 } else { 2374 // Store relative to framepointer. 2375 MemOpChains2.push_back( 2376 DAG.getStore(ArgChain, dl, Arg, FIN, 2377 MachinePointerInfo::getFixedStack(FI), 2378 false, false, 0)); 2379 } 2380 } 2381 } 2382 2383 if (!MemOpChains2.empty()) 2384 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2385 &MemOpChains2[0], MemOpChains2.size()); 2386 2387 // Copy arguments to their registers. 2388 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2389 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2390 RegsToPass[i].second, InFlag); 2391 InFlag = Chain.getValue(1); 2392 } 2393 InFlag =SDValue(); 2394 2395 // Store the return address to the appropriate stack slot. 2396 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit, 2397 FPDiff, dl); 2398 } 2399 2400 if (getTargetMachine().getCodeModel() == CodeModel::Large) { 2401 assert(Is64Bit && "Large code model is only legal in 64-bit mode."); 2402 // In the 64-bit large code model, we have to make all calls 2403 // through a register, since the call instruction's 32-bit 2404 // pc-relative offset may not be large enough to hold the whole 2405 // address. 2406 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2407 // If the callee is a GlobalAddress node (quite common, every direct call 2408 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack 2409 // it. 2410 2411 // We should use extra load for direct calls to dllimported functions in 2412 // non-JIT mode. 2413 const GlobalValue *GV = G->getGlobal(); 2414 if (!GV->hasDLLImportLinkage()) { 2415 unsigned char OpFlags = 0; 2416 bool ExtraLoad = false; 2417 unsigned WrapperKind = ISD::DELETED_NODE; 2418 2419 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to 2420 // external symbols most go through the PLT in PIC mode. If the symbol 2421 // has hidden or protected visibility, or if it is static or local, then 2422 // we don't need to use the PLT - we can directly call it. 2423 if (Subtarget->isTargetELF() && 2424 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 2425 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) { 2426 OpFlags = X86II::MO_PLT; 2427 } else if (Subtarget->isPICStyleStubAny() && 2428 (GV->isDeclaration() || GV->isWeakForLinker()) && 2429 (!Subtarget->getTargetTriple().isMacOSX() || 2430 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2431 // PC-relative references to external symbols should go through $stub, 2432 // unless we're building with the leopard linker or later, which 2433 // automatically synthesizes these stubs. 2434 OpFlags = X86II::MO_DARWIN_STUB; 2435 } else if (Subtarget->isPICStyleRIPRel() && 2436 isa<Function>(GV) && 2437 cast<Function>(GV)->hasFnAttr(Attribute::NonLazyBind)) { 2438 // If the function is marked as non-lazy, generate an indirect call 2439 // which loads from the GOT directly. This avoids runtime overhead 2440 // at the cost of eager binding (and one extra byte of encoding). 2441 OpFlags = X86II::MO_GOTPCREL; 2442 WrapperKind = X86ISD::WrapperRIP; 2443 ExtraLoad = true; 2444 } 2445 2446 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 2447 G->getOffset(), OpFlags); 2448 2449 // Add a wrapper if needed. 2450 if (WrapperKind != ISD::DELETED_NODE) 2451 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee); 2452 // Add extra indirection if needed. 2453 if (ExtraLoad) 2454 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee, 2455 MachinePointerInfo::getGOT(), 2456 false, false, false, 0); 2457 } 2458 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2459 unsigned char OpFlags = 0; 2460 2461 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to 2462 // external symbols should go through the PLT. 2463 if (Subtarget->isTargetELF() && 2464 getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2465 OpFlags = X86II::MO_PLT; 2466 } else if (Subtarget->isPICStyleStubAny() && 2467 (!Subtarget->getTargetTriple().isMacOSX() || 2468 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2469 // PC-relative references to external symbols should go through $stub, 2470 // unless we're building with the leopard linker or later, which 2471 // automatically synthesizes these stubs. 2472 OpFlags = X86II::MO_DARWIN_STUB; 2473 } 2474 2475 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(), 2476 OpFlags); 2477 } 2478 2479 // Returns a chain & a flag for retval copy to use. 2480 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2481 SmallVector<SDValue, 8> Ops; 2482 2483 if (!IsSibcall && isTailCall) { 2484 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2485 DAG.getIntPtrConstant(0, true), InFlag); 2486 InFlag = Chain.getValue(1); 2487 } 2488 2489 Ops.push_back(Chain); 2490 Ops.push_back(Callee); 2491 2492 if (isTailCall) 2493 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 2494 2495 // Add argument registers to the end of the list so that they are known live 2496 // into the call. 2497 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2498 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2499 RegsToPass[i].second.getValueType())); 2500 2501 // Add an implicit use GOT pointer in EBX. 2502 if (!isTailCall && Subtarget->isPICStyleGOT()) 2503 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 2504 2505 // Add an implicit use of AL for non-Windows x86 64-bit vararg functions. 2506 if (Is64Bit && isVarArg && !IsWin64) 2507 Ops.push_back(DAG.getRegister(X86::AL, MVT::i8)); 2508 2509 // Experimental: Add a register mask operand representing the call-preserved 2510 // registers. 2511 if (UseRegMask) { 2512 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 2513 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 2514 Ops.push_back(DAG.getRegisterMask(Mask)); 2515 } 2516 2517 if (InFlag.getNode()) 2518 Ops.push_back(InFlag); 2519 2520 if (isTailCall) { 2521 // We used to do: 2522 //// If this is the first return lowered for this function, add the regs 2523 //// to the liveout set for the function. 2524 // This isn't right, although it's probably harmless on x86; liveouts 2525 // should be computed from returns not tail calls. Consider a void 2526 // function making a tail call to a function returning int. 2527 return DAG.getNode(X86ISD::TC_RETURN, dl, 2528 NodeTys, &Ops[0], Ops.size()); 2529 } 2530 2531 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); 2532 InFlag = Chain.getValue(1); 2533 2534 // Create the CALLSEQ_END node. 2535 unsigned NumBytesForCalleeToPush; 2536 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2537 getTargetMachine().Options.GuaranteedTailCallOpt)) 2538 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 2539 else if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows && 2540 IsStructRet) 2541 // If this is a call to a struct-return function, the callee 2542 // pops the hidden struct pointer, so we have to push it back. 2543 // This is common for Darwin/X86, Linux & Mingw32 targets. 2544 // For MSVC Win32 targets, the caller pops the hidden struct pointer. 2545 NumBytesForCalleeToPush = 4; 2546 else 2547 NumBytesForCalleeToPush = 0; // Callee pops nothing. 2548 2549 // Returns a flag for retval copy to use. 2550 if (!IsSibcall) { 2551 Chain = DAG.getCALLSEQ_END(Chain, 2552 DAG.getIntPtrConstant(NumBytes, true), 2553 DAG.getIntPtrConstant(NumBytesForCalleeToPush, 2554 true), 2555 InFlag); 2556 InFlag = Chain.getValue(1); 2557 } 2558 2559 // Handle result values, copying them out of physregs into vregs that we 2560 // return. 2561 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 2562 Ins, dl, DAG, InVals); 2563} 2564 2565 2566//===----------------------------------------------------------------------===// 2567// Fast Calling Convention (tail call) implementation 2568//===----------------------------------------------------------------------===// 2569 2570// Like std call, callee cleans arguments, convention except that ECX is 2571// reserved for storing the tail called function address. Only 2 registers are 2572// free for argument passing (inreg). Tail call optimization is performed 2573// provided: 2574// * tailcallopt is enabled 2575// * caller/callee are fastcc 2576// On X86_64 architecture with GOT-style position independent code only local 2577// (within module) calls are supported at the moment. 2578// To keep the stack aligned according to platform abi the function 2579// GetAlignedArgumentStackSize ensures that argument delta is always multiples 2580// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 2581// If a tail called function callee has more arguments than the caller the 2582// caller needs to make sure that there is room to move the RETADDR to. This is 2583// achieved by reserving an area the size of the argument delta right after the 2584// original REtADDR, but before the saved framepointer or the spilled registers 2585// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 2586// stack layout: 2587// arg1 2588// arg2 2589// RETADDR 2590// [ new RETADDR 2591// move area ] 2592// (possible EBP) 2593// ESI 2594// EDI 2595// local1 .. 2596 2597/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 2598/// for a 16 byte align requirement. 2599unsigned 2600X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 2601 SelectionDAG& DAG) const { 2602 MachineFunction &MF = DAG.getMachineFunction(); 2603 const TargetMachine &TM = MF.getTarget(); 2604 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 2605 unsigned StackAlignment = TFI.getStackAlignment(); 2606 uint64_t AlignMask = StackAlignment - 1; 2607 int64_t Offset = StackSize; 2608 uint64_t SlotSize = TD->getPointerSize(); 2609 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 2610 // Number smaller than 12 so just add the difference. 2611 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 2612 } else { 2613 // Mask out lower bits, add stackalignment once plus the 12 bytes. 2614 Offset = ((~AlignMask) & Offset) + StackAlignment + 2615 (StackAlignment-SlotSize); 2616 } 2617 return Offset; 2618} 2619 2620/// MatchingStackOffset - Return true if the given stack call argument is 2621/// already available in the same position (relatively) of the caller's 2622/// incoming argument stack. 2623static 2624bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2625 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 2626 const X86InstrInfo *TII) { 2627 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 2628 int FI = INT_MAX; 2629 if (Arg.getOpcode() == ISD::CopyFromReg) { 2630 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2631 if (!TargetRegisterInfo::isVirtualRegister(VR)) 2632 return false; 2633 MachineInstr *Def = MRI->getVRegDef(VR); 2634 if (!Def) 2635 return false; 2636 if (!Flags.isByVal()) { 2637 if (!TII->isLoadFromStackSlot(Def, FI)) 2638 return false; 2639 } else { 2640 unsigned Opcode = Def->getOpcode(); 2641 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) && 2642 Def->getOperand(1).isFI()) { 2643 FI = Def->getOperand(1).getIndex(); 2644 Bytes = Flags.getByValSize(); 2645 } else 2646 return false; 2647 } 2648 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2649 if (Flags.isByVal()) 2650 // ByVal argument is passed in as a pointer but it's now being 2651 // dereferenced. e.g. 2652 // define @foo(%struct.X* %A) { 2653 // tail call @bar(%struct.X* byval %A) 2654 // } 2655 return false; 2656 SDValue Ptr = Ld->getBasePtr(); 2657 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2658 if (!FINode) 2659 return false; 2660 FI = FINode->getIndex(); 2661 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) { 2662 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg); 2663 FI = FINode->getIndex(); 2664 Bytes = Flags.getByValSize(); 2665 } else 2666 return false; 2667 2668 assert(FI != INT_MAX); 2669 if (!MFI->isFixedObjectIndex(FI)) 2670 return false; 2671 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 2672} 2673 2674/// IsEligibleForTailCallOptimization - Check whether the call is eligible 2675/// for tail call optimization. Targets which want to do tail call 2676/// optimization should implement this function. 2677bool 2678X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2679 CallingConv::ID CalleeCC, 2680 bool isVarArg, 2681 bool isCalleeStructRet, 2682 bool isCallerStructRet, 2683 const SmallVectorImpl<ISD::OutputArg> &Outs, 2684 const SmallVectorImpl<SDValue> &OutVals, 2685 const SmallVectorImpl<ISD::InputArg> &Ins, 2686 SelectionDAG& DAG) const { 2687 if (!IsTailCallConvention(CalleeCC) && 2688 CalleeCC != CallingConv::C) 2689 return false; 2690 2691 // If -tailcallopt is specified, make fastcc functions tail-callable. 2692 const MachineFunction &MF = DAG.getMachineFunction(); 2693 const Function *CallerF = DAG.getMachineFunction().getFunction(); 2694 CallingConv::ID CallerCC = CallerF->getCallingConv(); 2695 bool CCMatch = CallerCC == CalleeCC; 2696 2697 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2698 if (IsTailCallConvention(CalleeCC) && CCMatch) 2699 return true; 2700 return false; 2701 } 2702 2703 // Look for obvious safe cases to perform tail call optimization that do not 2704 // require ABI changes. This is what gcc calls sibcall. 2705 2706 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to 2707 // emit a special epilogue. 2708 if (RegInfo->needsStackRealignment(MF)) 2709 return false; 2710 2711 // Also avoid sibcall optimization if either caller or callee uses struct 2712 // return semantics. 2713 if (isCalleeStructRet || isCallerStructRet) 2714 return false; 2715 2716 // An stdcall caller is expected to clean up its arguments; the callee 2717 // isn't going to do that. 2718 if (!CCMatch && CallerCC==CallingConv::X86_StdCall) 2719 return false; 2720 2721 // Do not sibcall optimize vararg calls unless all arguments are passed via 2722 // registers. 2723 if (isVarArg && !Outs.empty()) { 2724 2725 // Optimizing for varargs on Win64 is unlikely to be safe without 2726 // additional testing. 2727 if (Subtarget->isTargetWin64()) 2728 return false; 2729 2730 SmallVector<CCValAssign, 16> ArgLocs; 2731 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2732 getTargetMachine(), ArgLocs, *DAG.getContext()); 2733 2734 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2735 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) 2736 if (!ArgLocs[i].isRegLoc()) 2737 return false; 2738 } 2739 2740 // If the call result is in ST0 / ST1, it needs to be popped off the x87 2741 // stack. Therefore, if it's not used by the call it is not safe to optimize 2742 // this into a sibcall. 2743 bool Unused = false; 2744 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 2745 if (!Ins[i].Used) { 2746 Unused = true; 2747 break; 2748 } 2749 } 2750 if (Unused) { 2751 SmallVector<CCValAssign, 16> RVLocs; 2752 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), 2753 getTargetMachine(), RVLocs, *DAG.getContext()); 2754 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 2755 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 2756 CCValAssign &VA = RVLocs[i]; 2757 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) 2758 return false; 2759 } 2760 } 2761 2762 // If the calling conventions do not match, then we'd better make sure the 2763 // results are returned in the same way as what the caller expects. 2764 if (!CCMatch) { 2765 SmallVector<CCValAssign, 16> RVLocs1; 2766 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 2767 getTargetMachine(), RVLocs1, *DAG.getContext()); 2768 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86); 2769 2770 SmallVector<CCValAssign, 16> RVLocs2; 2771 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 2772 getTargetMachine(), RVLocs2, *DAG.getContext()); 2773 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86); 2774 2775 if (RVLocs1.size() != RVLocs2.size()) 2776 return false; 2777 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 2778 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 2779 return false; 2780 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 2781 return false; 2782 if (RVLocs1[i].isRegLoc()) { 2783 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 2784 return false; 2785 } else { 2786 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 2787 return false; 2788 } 2789 } 2790 } 2791 2792 // If the callee takes no arguments then go on to check the results of the 2793 // call. 2794 if (!Outs.empty()) { 2795 // Check if stack adjustment is needed. For now, do not do this if any 2796 // argument is passed on the stack. 2797 SmallVector<CCValAssign, 16> ArgLocs; 2798 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2799 getTargetMachine(), ArgLocs, *DAG.getContext()); 2800 2801 // Allocate shadow area for Win64 2802 if (Subtarget->isTargetWin64()) { 2803 CCInfo.AllocateStack(32, 8); 2804 } 2805 2806 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2807 if (CCInfo.getNextStackOffset()) { 2808 MachineFunction &MF = DAG.getMachineFunction(); 2809 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) 2810 return false; 2811 2812 // Check if the arguments are already laid out in the right way as 2813 // the caller's fixed stack objects. 2814 MachineFrameInfo *MFI = MF.getFrameInfo(); 2815 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2816 const X86InstrInfo *TII = 2817 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 2818 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2819 CCValAssign &VA = ArgLocs[i]; 2820 SDValue Arg = OutVals[i]; 2821 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2822 if (VA.getLocInfo() == CCValAssign::Indirect) 2823 return false; 2824 if (!VA.isRegLoc()) { 2825 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2826 MFI, MRI, TII)) 2827 return false; 2828 } 2829 } 2830 } 2831 2832 // If the tailcall address may be in a register, then make sure it's 2833 // possible to register allocate for it. In 32-bit, the call address can 2834 // only target EAX, EDX, or ECX since the tail call must be scheduled after 2835 // callee-saved registers are restored. These happen to be the same 2836 // registers used to pass 'inreg' arguments so watch out for those. 2837 if (!Subtarget->is64Bit() && 2838 !isa<GlobalAddressSDNode>(Callee) && 2839 !isa<ExternalSymbolSDNode>(Callee)) { 2840 unsigned NumInRegs = 0; 2841 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2842 CCValAssign &VA = ArgLocs[i]; 2843 if (!VA.isRegLoc()) 2844 continue; 2845 unsigned Reg = VA.getLocReg(); 2846 switch (Reg) { 2847 default: break; 2848 case X86::EAX: case X86::EDX: case X86::ECX: 2849 if (++NumInRegs == 3) 2850 return false; 2851 break; 2852 } 2853 } 2854 } 2855 } 2856 2857 return true; 2858} 2859 2860FastISel * 2861X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 2862 return X86::createFastISel(funcInfo); 2863} 2864 2865 2866//===----------------------------------------------------------------------===// 2867// Other Lowering Hooks 2868//===----------------------------------------------------------------------===// 2869 2870static bool MayFoldLoad(SDValue Op) { 2871 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode()); 2872} 2873 2874static bool MayFoldIntoStore(SDValue Op) { 2875 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); 2876} 2877 2878static bool isTargetShuffle(unsigned Opcode) { 2879 switch(Opcode) { 2880 default: return false; 2881 case X86ISD::PSHUFD: 2882 case X86ISD::PSHUFHW: 2883 case X86ISD::PSHUFLW: 2884 case X86ISD::SHUFP: 2885 case X86ISD::PALIGN: 2886 case X86ISD::MOVLHPS: 2887 case X86ISD::MOVLHPD: 2888 case X86ISD::MOVHLPS: 2889 case X86ISD::MOVLPS: 2890 case X86ISD::MOVLPD: 2891 case X86ISD::MOVSHDUP: 2892 case X86ISD::MOVSLDUP: 2893 case X86ISD::MOVDDUP: 2894 case X86ISD::MOVSS: 2895 case X86ISD::MOVSD: 2896 case X86ISD::UNPCKL: 2897 case X86ISD::UNPCKH: 2898 case X86ISD::VPERMILP: 2899 case X86ISD::VPERM2X128: 2900 return true; 2901 } 2902} 2903 2904static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2905 SDValue V1, SelectionDAG &DAG) { 2906 switch(Opc) { 2907 default: llvm_unreachable("Unknown x86 shuffle node"); 2908 case X86ISD::MOVSHDUP: 2909 case X86ISD::MOVSLDUP: 2910 case X86ISD::MOVDDUP: 2911 return DAG.getNode(Opc, dl, VT, V1); 2912 } 2913} 2914 2915static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2916 SDValue V1, unsigned TargetMask, SelectionDAG &DAG) { 2917 switch(Opc) { 2918 default: llvm_unreachable("Unknown x86 shuffle node"); 2919 case X86ISD::PSHUFD: 2920 case X86ISD::PSHUFHW: 2921 case X86ISD::PSHUFLW: 2922 case X86ISD::VPERMILP: 2923 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8)); 2924 } 2925} 2926 2927static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2928 SDValue V1, SDValue V2, unsigned TargetMask, SelectionDAG &DAG) { 2929 switch(Opc) { 2930 default: llvm_unreachable("Unknown x86 shuffle node"); 2931 case X86ISD::PALIGN: 2932 case X86ISD::SHUFP: 2933 case X86ISD::VPERM2X128: 2934 return DAG.getNode(Opc, dl, VT, V1, V2, 2935 DAG.getConstant(TargetMask, MVT::i8)); 2936 } 2937} 2938 2939static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2940 SDValue V1, SDValue V2, SelectionDAG &DAG) { 2941 switch(Opc) { 2942 default: llvm_unreachable("Unknown x86 shuffle node"); 2943 case X86ISD::MOVLHPS: 2944 case X86ISD::MOVLHPD: 2945 case X86ISD::MOVHLPS: 2946 case X86ISD::MOVLPS: 2947 case X86ISD::MOVLPD: 2948 case X86ISD::MOVSS: 2949 case X86ISD::MOVSD: 2950 case X86ISD::UNPCKL: 2951 case X86ISD::UNPCKH: 2952 return DAG.getNode(Opc, dl, VT, V1, V2); 2953 } 2954} 2955 2956SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { 2957 MachineFunction &MF = DAG.getMachineFunction(); 2958 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 2959 int ReturnAddrIndex = FuncInfo->getRAIndex(); 2960 2961 if (ReturnAddrIndex == 0) { 2962 // Set up a frame object for the return address. 2963 uint64_t SlotSize = TD->getPointerSize(); 2964 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize, 2965 false); 2966 FuncInfo->setRAIndex(ReturnAddrIndex); 2967 } 2968 2969 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 2970} 2971 2972 2973bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, 2974 bool hasSymbolicDisplacement) { 2975 // Offset should fit into 32 bit immediate field. 2976 if (!isInt<32>(Offset)) 2977 return false; 2978 2979 // If we don't have a symbolic displacement - we don't have any extra 2980 // restrictions. 2981 if (!hasSymbolicDisplacement) 2982 return true; 2983 2984 // FIXME: Some tweaks might be needed for medium code model. 2985 if (M != CodeModel::Small && M != CodeModel::Kernel) 2986 return false; 2987 2988 // For small code model we assume that latest object is 16MB before end of 31 2989 // bits boundary. We may also accept pretty large negative constants knowing 2990 // that all objects are in the positive half of address space. 2991 if (M == CodeModel::Small && Offset < 16*1024*1024) 2992 return true; 2993 2994 // For kernel code model we know that all object resist in the negative half 2995 // of 32bits address space. We may not accept negative offsets, since they may 2996 // be just off and we may accept pretty large positive ones. 2997 if (M == CodeModel::Kernel && Offset > 0) 2998 return true; 2999 3000 return false; 3001} 3002 3003/// isCalleePop - Determines whether the callee is required to pop its 3004/// own arguments. Callee pop is necessary to support tail calls. 3005bool X86::isCalleePop(CallingConv::ID CallingConv, 3006 bool is64Bit, bool IsVarArg, bool TailCallOpt) { 3007 if (IsVarArg) 3008 return false; 3009 3010 switch (CallingConv) { 3011 default: 3012 return false; 3013 case CallingConv::X86_StdCall: 3014 return !is64Bit; 3015 case CallingConv::X86_FastCall: 3016 return !is64Bit; 3017 case CallingConv::X86_ThisCall: 3018 return !is64Bit; 3019 case CallingConv::Fast: 3020 return TailCallOpt; 3021 case CallingConv::GHC: 3022 return TailCallOpt; 3023 } 3024} 3025 3026/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86 3027/// specific condition code, returning the condition code and the LHS/RHS of the 3028/// comparison to make. 3029static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 3030 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) { 3031 if (!isFP) { 3032 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 3033 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 3034 // X > -1 -> X == 0, jump !sign. 3035 RHS = DAG.getConstant(0, RHS.getValueType()); 3036 return X86::COND_NS; 3037 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 3038 // X < 0 -> X == 0, jump on sign. 3039 return X86::COND_S; 3040 } else if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { 3041 // X < 1 -> X <= 0 3042 RHS = DAG.getConstant(0, RHS.getValueType()); 3043 return X86::COND_LE; 3044 } 3045 } 3046 3047 switch (SetCCOpcode) { 3048 default: llvm_unreachable("Invalid integer condition!"); 3049 case ISD::SETEQ: return X86::COND_E; 3050 case ISD::SETGT: return X86::COND_G; 3051 case ISD::SETGE: return X86::COND_GE; 3052 case ISD::SETLT: return X86::COND_L; 3053 case ISD::SETLE: return X86::COND_LE; 3054 case ISD::SETNE: return X86::COND_NE; 3055 case ISD::SETULT: return X86::COND_B; 3056 case ISD::SETUGT: return X86::COND_A; 3057 case ISD::SETULE: return X86::COND_BE; 3058 case ISD::SETUGE: return X86::COND_AE; 3059 } 3060 } 3061 3062 // First determine if it is required or is profitable to flip the operands. 3063 3064 // If LHS is a foldable load, but RHS is not, flip the condition. 3065 if (ISD::isNON_EXTLoad(LHS.getNode()) && 3066 !ISD::isNON_EXTLoad(RHS.getNode())) { 3067 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode); 3068 std::swap(LHS, RHS); 3069 } 3070 3071 switch (SetCCOpcode) { 3072 default: break; 3073 case ISD::SETOLT: 3074 case ISD::SETOLE: 3075 case ISD::SETUGT: 3076 case ISD::SETUGE: 3077 std::swap(LHS, RHS); 3078 break; 3079 } 3080 3081 // On a floating point condition, the flags are set as follows: 3082 // ZF PF CF op 3083 // 0 | 0 | 0 | X > Y 3084 // 0 | 0 | 1 | X < Y 3085 // 1 | 0 | 0 | X == Y 3086 // 1 | 1 | 1 | unordered 3087 switch (SetCCOpcode) { 3088 default: llvm_unreachable("Condcode should be pre-legalized away"); 3089 case ISD::SETUEQ: 3090 case ISD::SETEQ: return X86::COND_E; 3091 case ISD::SETOLT: // flipped 3092 case ISD::SETOGT: 3093 case ISD::SETGT: return X86::COND_A; 3094 case ISD::SETOLE: // flipped 3095 case ISD::SETOGE: 3096 case ISD::SETGE: return X86::COND_AE; 3097 case ISD::SETUGT: // flipped 3098 case ISD::SETULT: 3099 case ISD::SETLT: return X86::COND_B; 3100 case ISD::SETUGE: // flipped 3101 case ISD::SETULE: 3102 case ISD::SETLE: return X86::COND_BE; 3103 case ISD::SETONE: 3104 case ISD::SETNE: return X86::COND_NE; 3105 case ISD::SETUO: return X86::COND_P; 3106 case ISD::SETO: return X86::COND_NP; 3107 case ISD::SETOEQ: 3108 case ISD::SETUNE: return X86::COND_INVALID; 3109 } 3110} 3111 3112/// hasFPCMov - is there a floating point cmov for the specific X86 condition 3113/// code. Current x86 isa includes the following FP cmov instructions: 3114/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 3115static bool hasFPCMov(unsigned X86CC) { 3116 switch (X86CC) { 3117 default: 3118 return false; 3119 case X86::COND_B: 3120 case X86::COND_BE: 3121 case X86::COND_E: 3122 case X86::COND_P: 3123 case X86::COND_A: 3124 case X86::COND_AE: 3125 case X86::COND_NE: 3126 case X86::COND_NP: 3127 return true; 3128 } 3129} 3130 3131/// isFPImmLegal - Returns true if the target can instruction select the 3132/// specified FP immediate natively. If false, the legalizer will 3133/// materialize the FP immediate as a load from a constant pool. 3134bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 3135 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) { 3136 if (Imm.bitwiseIsEqual(LegalFPImmediates[i])) 3137 return true; 3138 } 3139 return false; 3140} 3141 3142/// isUndefOrInRange - Return true if Val is undef or if its value falls within 3143/// the specified range (L, H]. 3144static bool isUndefOrInRange(int Val, int Low, int Hi) { 3145 return (Val < 0) || (Val >= Low && Val < Hi); 3146} 3147 3148/// isUndefOrEqual - Val is either less than zero (undef) or equal to the 3149/// specified value. 3150static bool isUndefOrEqual(int Val, int CmpVal) { 3151 if (Val < 0 || Val == CmpVal) 3152 return true; 3153 return false; 3154} 3155 3156/// isSequentialOrUndefInRange - Return true if every element in Mask, begining 3157/// from position Pos and ending in Pos+Size, falls within the specified 3158/// sequential range (L, L+Pos]. or is undef. 3159static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, 3160 int Pos, int Size, int Low) { 3161 for (int i = Pos, e = Pos+Size; i != e; ++i, ++Low) 3162 if (!isUndefOrEqual(Mask[i], Low)) 3163 return false; 3164 return true; 3165} 3166 3167/// isPSHUFDMask - Return true if the node specifies a shuffle of elements that 3168/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference 3169/// the second operand. 3170static bool isPSHUFDMask(ArrayRef<int> Mask, EVT VT) { 3171 if (VT == MVT::v4f32 || VT == MVT::v4i32 ) 3172 return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4); 3173 if (VT == MVT::v2f64 || VT == MVT::v2i64) 3174 return (Mask[0] < 2 && Mask[1] < 2); 3175 return false; 3176} 3177 3178bool X86::isPSHUFDMask(ShuffleVectorSDNode *N) { 3179 return ::isPSHUFDMask(N->getMask(), N->getValueType(0)); 3180} 3181 3182/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that 3183/// is suitable for input to PSHUFHW. 3184static bool isPSHUFHWMask(ArrayRef<int> Mask, EVT VT) { 3185 if (VT != MVT::v8i16) 3186 return false; 3187 3188 // Lower quadword copied in order or undef. 3189 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0)) 3190 return false; 3191 3192 // Upper quadword shuffled. 3193 for (unsigned i = 4; i != 8; ++i) 3194 if (Mask[i] >= 0 && (Mask[i] < 4 || Mask[i] > 7)) 3195 return false; 3196 3197 return true; 3198} 3199 3200bool X86::isPSHUFHWMask(ShuffleVectorSDNode *N) { 3201 return ::isPSHUFHWMask(N->getMask(), N->getValueType(0)); 3202} 3203 3204/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that 3205/// is suitable for input to PSHUFLW. 3206static bool isPSHUFLWMask(ArrayRef<int> Mask, EVT VT) { 3207 if (VT != MVT::v8i16) 3208 return false; 3209 3210 // Upper quadword copied in order. 3211 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4)) 3212 return false; 3213 3214 // Lower quadword shuffled. 3215 for (unsigned i = 0; i != 4; ++i) 3216 if (Mask[i] >= 4) 3217 return false; 3218 3219 return true; 3220} 3221 3222bool X86::isPSHUFLWMask(ShuffleVectorSDNode *N) { 3223 return ::isPSHUFLWMask(N->getMask(), N->getValueType(0)); 3224} 3225 3226/// isPALIGNRMask - Return true if the node specifies a shuffle of elements that 3227/// is suitable for input to PALIGNR. 3228static bool isPALIGNRMask(ArrayRef<int> Mask, EVT VT, 3229 const X86Subtarget *Subtarget) { 3230 if ((VT.getSizeInBits() == 128 && !Subtarget->hasSSSE3()) || 3231 (VT.getSizeInBits() == 256 && !Subtarget->hasAVX2())) 3232 return false; 3233 3234 unsigned NumElts = VT.getVectorNumElements(); 3235 unsigned NumLanes = VT.getSizeInBits()/128; 3236 unsigned NumLaneElts = NumElts/NumLanes; 3237 3238 // Do not handle 64-bit element shuffles with palignr. 3239 if (NumLaneElts == 2) 3240 return false; 3241 3242 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) { 3243 unsigned i; 3244 for (i = 0; i != NumLaneElts; ++i) { 3245 if (Mask[i+l] >= 0) 3246 break; 3247 } 3248 3249 // Lane is all undef, go to next lane 3250 if (i == NumLaneElts) 3251 continue; 3252 3253 int Start = Mask[i+l]; 3254 3255 // Make sure its in this lane in one of the sources 3256 if (!isUndefOrInRange(Start, l, l+NumLaneElts) && 3257 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts)) 3258 return false; 3259 3260 // If not lane 0, then we must match lane 0 3261 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l)) 3262 return false; 3263 3264 // Correct second source to be contiguous with first source 3265 if (Start >= (int)NumElts) 3266 Start -= NumElts - NumLaneElts; 3267 3268 // Make sure we're shifting in the right direction. 3269 if (Start <= (int)(i+l)) 3270 return false; 3271 3272 Start -= i; 3273 3274 // Check the rest of the elements to see if they are consecutive. 3275 for (++i; i != NumLaneElts; ++i) { 3276 int Idx = Mask[i+l]; 3277 3278 // Make sure its in this lane 3279 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) && 3280 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts)) 3281 return false; 3282 3283 // If not lane 0, then we must match lane 0 3284 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l)) 3285 return false; 3286 3287 if (Idx >= (int)NumElts) 3288 Idx -= NumElts - NumLaneElts; 3289 3290 if (!isUndefOrEqual(Idx, Start+i)) 3291 return false; 3292 3293 } 3294 } 3295 3296 return true; 3297} 3298 3299/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 3300/// the two vector operands have swapped position. 3301static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, 3302 unsigned NumElems) { 3303 for (unsigned i = 0; i != NumElems; ++i) { 3304 int idx = Mask[i]; 3305 if (idx < 0) 3306 continue; 3307 else if (idx < (int)NumElems) 3308 Mask[i] = idx + NumElems; 3309 else 3310 Mask[i] = idx - NumElems; 3311 } 3312} 3313 3314/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 3315/// specifies a shuffle of elements that is suitable for input to 128/256-bit 3316/// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be 3317/// reverse of what x86 shuffles want. 3318static bool isSHUFPMask(ArrayRef<int> Mask, EVT VT, bool HasAVX, 3319 bool Commuted = false) { 3320 if (!HasAVX && VT.getSizeInBits() == 256) 3321 return false; 3322 3323 unsigned NumElems = VT.getVectorNumElements(); 3324 unsigned NumLanes = VT.getSizeInBits()/128; 3325 unsigned NumLaneElems = NumElems/NumLanes; 3326 3327 if (NumLaneElems != 2 && NumLaneElems != 4) 3328 return false; 3329 3330 // VSHUFPSY divides the resulting vector into 4 chunks. 3331 // The sources are also splitted into 4 chunks, and each destination 3332 // chunk must come from a different source chunk. 3333 // 3334 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0 3335 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9 3336 // 3337 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4, 3338 // Y3..Y0, Y3..Y0, X3..X0, X3..X0 3339 // 3340 // VSHUFPDY divides the resulting vector into 4 chunks. 3341 // The sources are also splitted into 4 chunks, and each destination 3342 // chunk must come from a different source chunk. 3343 // 3344 // SRC1 => X3 X2 X1 X0 3345 // SRC2 => Y3 Y2 Y1 Y0 3346 // 3347 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0 3348 // 3349 unsigned HalfLaneElems = NumLaneElems/2; 3350 for (unsigned l = 0; l != NumElems; l += NumLaneElems) { 3351 for (unsigned i = 0; i != NumLaneElems; ++i) { 3352 int Idx = Mask[i+l]; 3353 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0); 3354 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems)) 3355 return false; 3356 // For VSHUFPSY, the mask of the second half must be the same as the 3357 // first but with the appropriate offsets. This works in the same way as 3358 // VPERMILPS works with masks. 3359 if (NumElems != 8 || l == 0 || Mask[i] < 0) 3360 continue; 3361 if (!isUndefOrEqual(Idx, Mask[i]+l)) 3362 return false; 3363 } 3364 } 3365 3366 return true; 3367} 3368 3369bool X86::isSHUFPMask(ShuffleVectorSDNode *N, bool HasAVX) { 3370 return ::isSHUFPMask(N->getMask(), N->getValueType(0), HasAVX); 3371} 3372 3373/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 3374/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 3375bool X86::isMOVHLPSMask(ShuffleVectorSDNode *N) { 3376 EVT VT = N->getValueType(0); 3377 unsigned NumElems = VT.getVectorNumElements(); 3378 3379 if (VT.getSizeInBits() != 128) 3380 return false; 3381 3382 if (NumElems != 4) 3383 return false; 3384 3385 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 3386 return isUndefOrEqual(N->getMaskElt(0), 6) && 3387 isUndefOrEqual(N->getMaskElt(1), 7) && 3388 isUndefOrEqual(N->getMaskElt(2), 2) && 3389 isUndefOrEqual(N->getMaskElt(3), 3); 3390} 3391 3392/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 3393/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 3394/// <2, 3, 2, 3> 3395bool X86::isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N) { 3396 EVT VT = N->getValueType(0); 3397 unsigned NumElems = VT.getVectorNumElements(); 3398 3399 if (VT.getSizeInBits() != 128) 3400 return false; 3401 3402 if (NumElems != 4) 3403 return false; 3404 3405 return isUndefOrEqual(N->getMaskElt(0), 2) && 3406 isUndefOrEqual(N->getMaskElt(1), 3) && 3407 isUndefOrEqual(N->getMaskElt(2), 2) && 3408 isUndefOrEqual(N->getMaskElt(3), 3); 3409} 3410 3411/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 3412/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 3413bool X86::isMOVLPMask(ShuffleVectorSDNode *N) { 3414 EVT VT = N->getValueType(0); 3415 3416 if (VT.getSizeInBits() != 128) 3417 return false; 3418 3419 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 3420 3421 if (NumElems != 2 && NumElems != 4) 3422 return false; 3423 3424 for (unsigned i = 0; i < NumElems/2; ++i) 3425 if (!isUndefOrEqual(N->getMaskElt(i), i + NumElems)) 3426 return false; 3427 3428 for (unsigned i = NumElems/2; i < NumElems; ++i) 3429 if (!isUndefOrEqual(N->getMaskElt(i), i)) 3430 return false; 3431 3432 return true; 3433} 3434 3435/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand 3436/// specifies a shuffle of elements that is suitable for input to MOVLHPS. 3437bool X86::isMOVLHPSMask(ShuffleVectorSDNode *N) { 3438 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 3439 3440 if ((NumElems != 2 && NumElems != 4) 3441 || N->getValueType(0).getSizeInBits() > 128) 3442 return false; 3443 3444 for (unsigned i = 0; i < NumElems/2; ++i) 3445 if (!isUndefOrEqual(N->getMaskElt(i), i)) 3446 return false; 3447 3448 for (unsigned i = 0; i < NumElems/2; ++i) 3449 if (!isUndefOrEqual(N->getMaskElt(i + NumElems/2), i + NumElems)) 3450 return false; 3451 3452 return true; 3453} 3454 3455/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 3456/// specifies a shuffle of elements that is suitable for input to UNPCKL. 3457static bool isUNPCKLMask(ArrayRef<int> Mask, EVT VT, 3458 bool HasAVX2, bool V2IsSplat = false) { 3459 unsigned NumElts = VT.getVectorNumElements(); 3460 3461 assert((VT.is128BitVector() || VT.is256BitVector()) && 3462 "Unsupported vector type for unpckh"); 3463 3464 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3465 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3466 return false; 3467 3468 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3469 // independently on 128-bit lanes. 3470 unsigned NumLanes = VT.getSizeInBits()/128; 3471 unsigned NumLaneElts = NumElts/NumLanes; 3472 3473 for (unsigned l = 0; l != NumLanes; ++l) { 3474 for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; 3475 i != (l+1)*NumLaneElts; 3476 i += 2, ++j) { 3477 int BitI = Mask[i]; 3478 int BitI1 = Mask[i+1]; 3479 if (!isUndefOrEqual(BitI, j)) 3480 return false; 3481 if (V2IsSplat) { 3482 if (!isUndefOrEqual(BitI1, NumElts)) 3483 return false; 3484 } else { 3485 if (!isUndefOrEqual(BitI1, j + NumElts)) 3486 return false; 3487 } 3488 } 3489 } 3490 3491 return true; 3492} 3493 3494bool X86::isUNPCKLMask(ShuffleVectorSDNode *N, bool HasAVX2, bool V2IsSplat) { 3495 return ::isUNPCKLMask(N->getMask(), N->getValueType(0), HasAVX2, V2IsSplat); 3496} 3497 3498/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 3499/// specifies a shuffle of elements that is suitable for input to UNPCKH. 3500static bool isUNPCKHMask(ArrayRef<int> Mask, EVT VT, 3501 bool HasAVX2, bool V2IsSplat = false) { 3502 unsigned NumElts = VT.getVectorNumElements(); 3503 3504 assert((VT.is128BitVector() || VT.is256BitVector()) && 3505 "Unsupported vector type for unpckh"); 3506 3507 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3508 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3509 return false; 3510 3511 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3512 // independently on 128-bit lanes. 3513 unsigned NumLanes = VT.getSizeInBits()/128; 3514 unsigned NumLaneElts = NumElts/NumLanes; 3515 3516 for (unsigned l = 0; l != NumLanes; ++l) { 3517 for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; 3518 i != (l+1)*NumLaneElts; i += 2, ++j) { 3519 int BitI = Mask[i]; 3520 int BitI1 = Mask[i+1]; 3521 if (!isUndefOrEqual(BitI, j)) 3522 return false; 3523 if (V2IsSplat) { 3524 if (isUndefOrEqual(BitI1, NumElts)) 3525 return false; 3526 } else { 3527 if (!isUndefOrEqual(BitI1, j+NumElts)) 3528 return false; 3529 } 3530 } 3531 } 3532 return true; 3533} 3534 3535bool X86::isUNPCKHMask(ShuffleVectorSDNode *N, bool HasAVX2, bool V2IsSplat) { 3536 return ::isUNPCKHMask(N->getMask(), N->getValueType(0), HasAVX2, V2IsSplat); 3537} 3538 3539/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 3540/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 3541/// <0, 0, 1, 1> 3542static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, EVT VT, 3543 bool HasAVX2) { 3544 unsigned NumElts = VT.getVectorNumElements(); 3545 3546 assert((VT.is128BitVector() || VT.is256BitVector()) && 3547 "Unsupported vector type for unpckh"); 3548 3549 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3550 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3551 return false; 3552 3553 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern 3554 // FIXME: Need a better way to get rid of this, there's no latency difference 3555 // between UNPCKLPD and MOVDDUP, the later should always be checked first and 3556 // the former later. We should also remove the "_undef" special mask. 3557 if (NumElts == 4 && VT.getSizeInBits() == 256) 3558 return false; 3559 3560 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3561 // independently on 128-bit lanes. 3562 unsigned NumLanes = VT.getSizeInBits()/128; 3563 unsigned NumLaneElts = NumElts/NumLanes; 3564 3565 for (unsigned l = 0; l != NumLanes; ++l) { 3566 for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; 3567 i != (l+1)*NumLaneElts; 3568 i += 2, ++j) { 3569 int BitI = Mask[i]; 3570 int BitI1 = Mask[i+1]; 3571 3572 if (!isUndefOrEqual(BitI, j)) 3573 return false; 3574 if (!isUndefOrEqual(BitI1, j)) 3575 return false; 3576 } 3577 } 3578 3579 return true; 3580} 3581 3582bool X86::isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N, bool HasAVX2) { 3583 return ::isUNPCKL_v_undef_Mask(N->getMask(), N->getValueType(0), HasAVX2); 3584} 3585 3586/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 3587/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 3588/// <2, 2, 3, 3> 3589static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) { 3590 unsigned NumElts = VT.getVectorNumElements(); 3591 3592 assert((VT.is128BitVector() || VT.is256BitVector()) && 3593 "Unsupported vector type for unpckh"); 3594 3595 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3596 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3597 return false; 3598 3599 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3600 // independently on 128-bit lanes. 3601 unsigned NumLanes = VT.getSizeInBits()/128; 3602 unsigned NumLaneElts = NumElts/NumLanes; 3603 3604 for (unsigned l = 0; l != NumLanes; ++l) { 3605 for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; 3606 i != (l+1)*NumLaneElts; i += 2, ++j) { 3607 int BitI = Mask[i]; 3608 int BitI1 = Mask[i+1]; 3609 if (!isUndefOrEqual(BitI, j)) 3610 return false; 3611 if (!isUndefOrEqual(BitI1, j)) 3612 return false; 3613 } 3614 } 3615 return true; 3616} 3617 3618bool X86::isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N, bool HasAVX2) { 3619 return ::isUNPCKH_v_undef_Mask(N->getMask(), N->getValueType(0), HasAVX2); 3620} 3621 3622/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 3623/// specifies a shuffle of elements that is suitable for input to MOVSS, 3624/// MOVSD, and MOVD, i.e. setting the lowest element. 3625static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) { 3626 if (VT.getVectorElementType().getSizeInBits() < 32) 3627 return false; 3628 if (VT.getSizeInBits() == 256) 3629 return false; 3630 3631 unsigned NumElts = VT.getVectorNumElements(); 3632 3633 if (!isUndefOrEqual(Mask[0], NumElts)) 3634 return false; 3635 3636 for (unsigned i = 1; i != NumElts; ++i) 3637 if (!isUndefOrEqual(Mask[i], i)) 3638 return false; 3639 3640 return true; 3641} 3642 3643bool X86::isMOVLMask(ShuffleVectorSDNode *N) { 3644 return ::isMOVLMask(N->getMask(), N->getValueType(0)); 3645} 3646 3647/// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered 3648/// as permutations between 128-bit chunks or halves. As an example: this 3649/// shuffle bellow: 3650/// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15> 3651/// The first half comes from the second half of V1 and the second half from the 3652/// the second half of V2. 3653static bool isVPERM2X128Mask(ArrayRef<int> Mask, EVT VT, bool HasAVX) { 3654 if (!HasAVX || VT.getSizeInBits() != 256) 3655 return false; 3656 3657 // The shuffle result is divided into half A and half B. In total the two 3658 // sources have 4 halves, namely: C, D, E, F. The final values of A and 3659 // B must come from C, D, E or F. 3660 unsigned HalfSize = VT.getVectorNumElements()/2; 3661 bool MatchA = false, MatchB = false; 3662 3663 // Check if A comes from one of C, D, E, F. 3664 for (unsigned Half = 0; Half != 4; ++Half) { 3665 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) { 3666 MatchA = true; 3667 break; 3668 } 3669 } 3670 3671 // Check if B comes from one of C, D, E, F. 3672 for (unsigned Half = 0; Half != 4; ++Half) { 3673 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) { 3674 MatchB = true; 3675 break; 3676 } 3677 } 3678 3679 return MatchA && MatchB; 3680} 3681 3682/// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle 3683/// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions. 3684static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) { 3685 EVT VT = SVOp->getValueType(0); 3686 3687 unsigned HalfSize = VT.getVectorNumElements()/2; 3688 3689 unsigned FstHalf = 0, SndHalf = 0; 3690 for (unsigned i = 0; i < HalfSize; ++i) { 3691 if (SVOp->getMaskElt(i) > 0) { 3692 FstHalf = SVOp->getMaskElt(i)/HalfSize; 3693 break; 3694 } 3695 } 3696 for (unsigned i = HalfSize; i < HalfSize*2; ++i) { 3697 if (SVOp->getMaskElt(i) > 0) { 3698 SndHalf = SVOp->getMaskElt(i)/HalfSize; 3699 break; 3700 } 3701 } 3702 3703 return (FstHalf | (SndHalf << 4)); 3704} 3705 3706/// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand 3707/// specifies a shuffle of elements that is suitable for input to VPERMILPD*. 3708/// Note that VPERMIL mask matching is different depending whether theunderlying 3709/// type is 32 or 64. In the VPERMILPS the high half of the mask should point 3710/// to the same elements of the low, but to the higher half of the source. 3711/// In VPERMILPD the two lanes could be shuffled independently of each other 3712/// with the same restriction that lanes can't be crossed. 3713static bool isVPERMILPMask(ArrayRef<int> Mask, EVT VT, bool HasAVX) { 3714 if (!HasAVX) 3715 return false; 3716 3717 unsigned NumElts = VT.getVectorNumElements(); 3718 // Only match 256-bit with 32/64-bit types 3719 if (VT.getSizeInBits() != 256 || (NumElts != 4 && NumElts != 8)) 3720 return false; 3721 3722 unsigned NumLanes = VT.getSizeInBits()/128; 3723 unsigned LaneSize = NumElts/NumLanes; 3724 for (unsigned l = 0; l != NumElts; l += LaneSize) { 3725 for (unsigned i = 0; i != LaneSize; ++i) { 3726 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize)) 3727 return false; 3728 if (NumElts != 8 || l == 0) 3729 continue; 3730 // VPERMILPS handling 3731 if (Mask[i] < 0) 3732 continue; 3733 if (!isUndefOrEqual(Mask[i+l], Mask[i]+l)) 3734 return false; 3735 } 3736 } 3737 3738 return true; 3739} 3740 3741/// getShuffleVPERMILPImmediate - Return the appropriate immediate to shuffle 3742/// the specified VECTOR_MASK mask with VPERMILPS/D* instructions. 3743static unsigned getShuffleVPERMILPImmediate(ShuffleVectorSDNode *SVOp) { 3744 EVT VT = SVOp->getValueType(0); 3745 3746 unsigned NumElts = VT.getVectorNumElements(); 3747 unsigned NumLanes = VT.getSizeInBits()/128; 3748 unsigned LaneSize = NumElts/NumLanes; 3749 3750 // Although the mask is equal for both lanes do it twice to get the cases 3751 // where a mask will match because the same mask element is undef on the 3752 // first half but valid on the second. This would get pathological cases 3753 // such as: shuffle <u, 0, 1, 2, 4, 4, 5, 6>, which is completely valid. 3754 unsigned Shift = (LaneSize == 4) ? 2 : 1; 3755 unsigned Mask = 0; 3756 for (unsigned i = 0; i != NumElts; ++i) { 3757 int MaskElt = SVOp->getMaskElt(i); 3758 if (MaskElt < 0) 3759 continue; 3760 MaskElt %= LaneSize; 3761 unsigned Shamt = i; 3762 // VPERMILPSY, the mask of the first half must be equal to the second one 3763 if (NumElts == 8) Shamt %= LaneSize; 3764 Mask |= MaskElt << (Shamt*Shift); 3765 } 3766 3767 return Mask; 3768} 3769 3770/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 3771/// of what x86 movss want. X86 movs requires the lowest element to be lowest 3772/// element of vector 2 and the other elements to come from vector 1 in order. 3773static bool isCommutedMOVLMask(ArrayRef<int> Mask, EVT VT, 3774 bool V2IsSplat = false, bool V2IsUndef = false) { 3775 unsigned NumOps = VT.getVectorNumElements(); 3776 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 3777 return false; 3778 3779 if (!isUndefOrEqual(Mask[0], 0)) 3780 return false; 3781 3782 for (unsigned i = 1; i != NumOps; ++i) 3783 if (!(isUndefOrEqual(Mask[i], i+NumOps) || 3784 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) || 3785 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps)))) 3786 return false; 3787 3788 return true; 3789} 3790 3791static bool isCommutedMOVL(ShuffleVectorSDNode *N, bool V2IsSplat = false, 3792 bool V2IsUndef = false) { 3793 return isCommutedMOVLMask(N->getMask(), N->getValueType(0), 3794 V2IsSplat, V2IsUndef); 3795} 3796 3797/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3798/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 3799/// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7> 3800bool X86::isMOVSHDUPMask(ShuffleVectorSDNode *N, 3801 const X86Subtarget *Subtarget) { 3802 if (!Subtarget->hasSSE3()) 3803 return false; 3804 3805 // The second vector must be undef 3806 if (N->getOperand(1).getOpcode() != ISD::UNDEF) 3807 return false; 3808 3809 EVT VT = N->getValueType(0); 3810 unsigned NumElems = VT.getVectorNumElements(); 3811 3812 if ((VT.getSizeInBits() == 128 && NumElems != 4) || 3813 (VT.getSizeInBits() == 256 && NumElems != 8)) 3814 return false; 3815 3816 // "i+1" is the value the indexed mask element must have 3817 for (unsigned i = 0; i < NumElems; i += 2) 3818 if (!isUndefOrEqual(N->getMaskElt(i), i+1) || 3819 !isUndefOrEqual(N->getMaskElt(i+1), i+1)) 3820 return false; 3821 3822 return true; 3823} 3824 3825/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3826/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 3827/// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6> 3828bool X86::isMOVSLDUPMask(ShuffleVectorSDNode *N, 3829 const X86Subtarget *Subtarget) { 3830 if (!Subtarget->hasSSE3()) 3831 return false; 3832 3833 // The second vector must be undef 3834 if (N->getOperand(1).getOpcode() != ISD::UNDEF) 3835 return false; 3836 3837 EVT VT = N->getValueType(0); 3838 unsigned NumElems = VT.getVectorNumElements(); 3839 3840 if ((VT.getSizeInBits() == 128 && NumElems != 4) || 3841 (VT.getSizeInBits() == 256 && NumElems != 8)) 3842 return false; 3843 3844 // "i" is the value the indexed mask element must have 3845 for (unsigned i = 0; i != NumElems; i += 2) 3846 if (!isUndefOrEqual(N->getMaskElt(i), i) || 3847 !isUndefOrEqual(N->getMaskElt(i+1), i)) 3848 return false; 3849 3850 return true; 3851} 3852 3853/// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand 3854/// specifies a shuffle of elements that is suitable for input to 256-bit 3855/// version of MOVDDUP. 3856static bool isMOVDDUPYMask(ArrayRef<int> Mask, EVT VT, bool HasAVX) { 3857 unsigned NumElts = VT.getVectorNumElements(); 3858 3859 if (!HasAVX || VT.getSizeInBits() != 256 || NumElts != 4) 3860 return false; 3861 3862 for (unsigned i = 0; i != NumElts/2; ++i) 3863 if (!isUndefOrEqual(Mask[i], 0)) 3864 return false; 3865 for (unsigned i = NumElts/2; i != NumElts; ++i) 3866 if (!isUndefOrEqual(Mask[i], NumElts/2)) 3867 return false; 3868 return true; 3869} 3870 3871/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3872/// specifies a shuffle of elements that is suitable for input to 128-bit 3873/// version of MOVDDUP. 3874bool X86::isMOVDDUPMask(ShuffleVectorSDNode *N) { 3875 EVT VT = N->getValueType(0); 3876 3877 if (VT.getSizeInBits() != 128) 3878 return false; 3879 3880 unsigned e = VT.getVectorNumElements() / 2; 3881 for (unsigned i = 0; i != e; ++i) 3882 if (!isUndefOrEqual(N->getMaskElt(i), i)) 3883 return false; 3884 for (unsigned i = 0; i != e; ++i) 3885 if (!isUndefOrEqual(N->getMaskElt(e+i), i)) 3886 return false; 3887 return true; 3888} 3889 3890/// isVEXTRACTF128Index - Return true if the specified 3891/// EXTRACT_SUBVECTOR operand specifies a vector extract that is 3892/// suitable for input to VEXTRACTF128. 3893bool X86::isVEXTRACTF128Index(SDNode *N) { 3894 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 3895 return false; 3896 3897 // The index should be aligned on a 128-bit boundary. 3898 uint64_t Index = 3899 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 3900 3901 unsigned VL = N->getValueType(0).getVectorNumElements(); 3902 unsigned VBits = N->getValueType(0).getSizeInBits(); 3903 unsigned ElSize = VBits / VL; 3904 bool Result = (Index * ElSize) % 128 == 0; 3905 3906 return Result; 3907} 3908 3909/// isVINSERTF128Index - Return true if the specified INSERT_SUBVECTOR 3910/// operand specifies a subvector insert that is suitable for input to 3911/// VINSERTF128. 3912bool X86::isVINSERTF128Index(SDNode *N) { 3913 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 3914 return false; 3915 3916 // The index should be aligned on a 128-bit boundary. 3917 uint64_t Index = 3918 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 3919 3920 unsigned VL = N->getValueType(0).getVectorNumElements(); 3921 unsigned VBits = N->getValueType(0).getSizeInBits(); 3922 unsigned ElSize = VBits / VL; 3923 bool Result = (Index * ElSize) % 128 == 0; 3924 3925 return Result; 3926} 3927 3928/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 3929/// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions. 3930/// Handles 128-bit and 256-bit. 3931unsigned X86::getShuffleSHUFImmediate(ShuffleVectorSDNode *N) { 3932 EVT VT = N->getValueType(0); 3933 3934 assert((VT.is128BitVector() || VT.is256BitVector()) && 3935 "Unsupported vector type for PSHUF/SHUFP"); 3936 3937 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate 3938 // independently on 128-bit lanes. 3939 unsigned NumElts = VT.getVectorNumElements(); 3940 unsigned NumLanes = VT.getSizeInBits()/128; 3941 unsigned NumLaneElts = NumElts/NumLanes; 3942 3943 assert((NumLaneElts == 2 || NumLaneElts == 4) && 3944 "Only supports 2 or 4 elements per lane"); 3945 3946 unsigned Shift = (NumLaneElts == 4) ? 1 : 0; 3947 unsigned Mask = 0; 3948 for (unsigned i = 0; i != NumElts; ++i) { 3949 int Elt = N->getMaskElt(i); 3950 if (Elt < 0) continue; 3951 Elt %= NumLaneElts; 3952 unsigned ShAmt = i << Shift; 3953 if (ShAmt >= 8) ShAmt -= 8; 3954 Mask |= Elt << ShAmt; 3955 } 3956 3957 return Mask; 3958} 3959 3960/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 3961/// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction. 3962unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 3963 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3964 unsigned Mask = 0; 3965 // 8 nodes, but we only care about the last 4. 3966 for (unsigned i = 7; i >= 4; --i) { 3967 int Val = SVOp->getMaskElt(i); 3968 if (Val >= 0) 3969 Mask |= (Val - 4); 3970 if (i != 4) 3971 Mask <<= 2; 3972 } 3973 return Mask; 3974} 3975 3976/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 3977/// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction. 3978unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 3979 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3980 unsigned Mask = 0; 3981 // 8 nodes, but we only care about the first 4. 3982 for (int i = 3; i >= 0; --i) { 3983 int Val = SVOp->getMaskElt(i); 3984 if (Val >= 0) 3985 Mask |= Val; 3986 if (i != 0) 3987 Mask <<= 2; 3988 } 3989 return Mask; 3990} 3991 3992/// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle 3993/// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction. 3994static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) { 3995 EVT VT = SVOp->getValueType(0); 3996 unsigned EltSize = VT.getVectorElementType().getSizeInBits() >> 3; 3997 3998 unsigned NumElts = VT.getVectorNumElements(); 3999 unsigned NumLanes = VT.getSizeInBits()/128; 4000 unsigned NumLaneElts = NumElts/NumLanes; 4001 4002 int Val = 0; 4003 unsigned i; 4004 for (i = 0; i != NumElts; ++i) { 4005 Val = SVOp->getMaskElt(i); 4006 if (Val >= 0) 4007 break; 4008 } 4009 if (Val >= (int)NumElts) 4010 Val -= NumElts - NumLaneElts; 4011 4012 assert(Val - i > 0 && "PALIGNR imm should be positive"); 4013 return (Val - i) * EltSize; 4014} 4015 4016/// getExtractVEXTRACTF128Immediate - Return the appropriate immediate 4017/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128 4018/// instructions. 4019unsigned X86::getExtractVEXTRACTF128Immediate(SDNode *N) { 4020 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 4021 llvm_unreachable("Illegal extract subvector for VEXTRACTF128"); 4022 4023 uint64_t Index = 4024 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 4025 4026 EVT VecVT = N->getOperand(0).getValueType(); 4027 EVT ElVT = VecVT.getVectorElementType(); 4028 4029 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4030 return Index / NumElemsPerChunk; 4031} 4032 4033/// getInsertVINSERTF128Immediate - Return the appropriate immediate 4034/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128 4035/// instructions. 4036unsigned X86::getInsertVINSERTF128Immediate(SDNode *N) { 4037 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 4038 llvm_unreachable("Illegal insert subvector for VINSERTF128"); 4039 4040 uint64_t Index = 4041 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 4042 4043 EVT VecVT = N->getValueType(0); 4044 EVT ElVT = VecVT.getVectorElementType(); 4045 4046 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4047 return Index / NumElemsPerChunk; 4048} 4049 4050/// isZeroNode - Returns true if Elt is a constant zero or a floating point 4051/// constant +0.0. 4052bool X86::isZeroNode(SDValue Elt) { 4053 return ((isa<ConstantSDNode>(Elt) && 4054 cast<ConstantSDNode>(Elt)->isNullValue()) || 4055 (isa<ConstantFPSDNode>(Elt) && 4056 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 4057} 4058 4059/// CommuteVectorShuffle - Swap vector_shuffle operands as well as values in 4060/// their permute mask. 4061static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp, 4062 SelectionDAG &DAG) { 4063 EVT VT = SVOp->getValueType(0); 4064 unsigned NumElems = VT.getVectorNumElements(); 4065 SmallVector<int, 8> MaskVec; 4066 4067 for (unsigned i = 0; i != NumElems; ++i) { 4068 int idx = SVOp->getMaskElt(i); 4069 if (idx < 0) 4070 MaskVec.push_back(idx); 4071 else if (idx < (int)NumElems) 4072 MaskVec.push_back(idx + NumElems); 4073 else 4074 MaskVec.push_back(idx - NumElems); 4075 } 4076 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1), 4077 SVOp->getOperand(0), &MaskVec[0]); 4078} 4079 4080/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 4081/// match movhlps. The lower half elements should come from upper half of 4082/// V1 (and in order), and the upper half elements should come from the upper 4083/// half of V2 (and in order). 4084static bool ShouldXformToMOVHLPS(ShuffleVectorSDNode *Op) { 4085 EVT VT = Op->getValueType(0); 4086 if (VT.getSizeInBits() != 128) 4087 return false; 4088 if (VT.getVectorNumElements() != 4) 4089 return false; 4090 for (unsigned i = 0, e = 2; i != e; ++i) 4091 if (!isUndefOrEqual(Op->getMaskElt(i), i+2)) 4092 return false; 4093 for (unsigned i = 2; i != 4; ++i) 4094 if (!isUndefOrEqual(Op->getMaskElt(i), i+4)) 4095 return false; 4096 return true; 4097} 4098 4099/// isScalarLoadToVector - Returns true if the node is a scalar load that 4100/// is promoted to a vector. It also returns the LoadSDNode by reference if 4101/// required. 4102static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) { 4103 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR) 4104 return false; 4105 N = N->getOperand(0).getNode(); 4106 if (!ISD::isNON_EXTLoad(N)) 4107 return false; 4108 if (LD) 4109 *LD = cast<LoadSDNode>(N); 4110 return true; 4111} 4112 4113// Test whether the given value is a vector value which will be legalized 4114// into a load. 4115static bool WillBeConstantPoolLoad(SDNode *N) { 4116 if (N->getOpcode() != ISD::BUILD_VECTOR) 4117 return false; 4118 4119 // Check for any non-constant elements. 4120 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 4121 switch (N->getOperand(i).getNode()->getOpcode()) { 4122 case ISD::UNDEF: 4123 case ISD::ConstantFP: 4124 case ISD::Constant: 4125 break; 4126 default: 4127 return false; 4128 } 4129 4130 // Vectors of all-zeros and all-ones are materialized with special 4131 // instructions rather than being loaded. 4132 return !ISD::isBuildVectorAllZeros(N) && 4133 !ISD::isBuildVectorAllOnes(N); 4134} 4135 4136/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 4137/// match movlp{s|d}. The lower half elements should come from lower half of 4138/// V1 (and in order), and the upper half elements should come from the upper 4139/// half of V2 (and in order). And since V1 will become the source of the 4140/// MOVLP, it must be either a vector load or a scalar load to vector. 4141static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, 4142 ShuffleVectorSDNode *Op) { 4143 EVT VT = Op->getValueType(0); 4144 if (VT.getSizeInBits() != 128) 4145 return false; 4146 4147 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 4148 return false; 4149 // Is V2 is a vector load, don't do this transformation. We will try to use 4150 // load folding shufps op. 4151 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2)) 4152 return false; 4153 4154 unsigned NumElems = VT.getVectorNumElements(); 4155 4156 if (NumElems != 2 && NumElems != 4) 4157 return false; 4158 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 4159 if (!isUndefOrEqual(Op->getMaskElt(i), i)) 4160 return false; 4161 for (unsigned i = NumElems/2; i != NumElems; ++i) 4162 if (!isUndefOrEqual(Op->getMaskElt(i), i+NumElems)) 4163 return false; 4164 return true; 4165} 4166 4167/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 4168/// all the same. 4169static bool isSplatVector(SDNode *N) { 4170 if (N->getOpcode() != ISD::BUILD_VECTOR) 4171 return false; 4172 4173 SDValue SplatValue = N->getOperand(0); 4174 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 4175 if (N->getOperand(i) != SplatValue) 4176 return false; 4177 return true; 4178} 4179 4180/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 4181/// to an zero vector. 4182/// FIXME: move to dag combiner / method on ShuffleVectorSDNode 4183static bool isZeroShuffle(ShuffleVectorSDNode *N) { 4184 SDValue V1 = N->getOperand(0); 4185 SDValue V2 = N->getOperand(1); 4186 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 4187 for (unsigned i = 0; i != NumElems; ++i) { 4188 int Idx = N->getMaskElt(i); 4189 if (Idx >= (int)NumElems) { 4190 unsigned Opc = V2.getOpcode(); 4191 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode())) 4192 continue; 4193 if (Opc != ISD::BUILD_VECTOR || 4194 !X86::isZeroNode(V2.getOperand(Idx-NumElems))) 4195 return false; 4196 } else if (Idx >= 0) { 4197 unsigned Opc = V1.getOpcode(); 4198 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode())) 4199 continue; 4200 if (Opc != ISD::BUILD_VECTOR || 4201 !X86::isZeroNode(V1.getOperand(Idx))) 4202 return false; 4203 } 4204 } 4205 return true; 4206} 4207 4208/// getZeroVector - Returns a vector of specified type with all zero elements. 4209/// 4210static SDValue getZeroVector(EVT VT, bool HasSSE2, bool HasAVX2, 4211 SelectionDAG &DAG, DebugLoc dl) { 4212 assert(VT.isVector() && "Expected a vector type"); 4213 4214 // Always build SSE zero vectors as <4 x i32> bitcasted 4215 // to their dest type. This ensures they get CSE'd. 4216 SDValue Vec; 4217 if (VT.getSizeInBits() == 128) { // SSE 4218 if (HasSSE2) { // SSE2 4219 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4220 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4221 } else { // SSE1 4222 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4223 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); 4224 } 4225 } else if (VT.getSizeInBits() == 256) { // AVX 4226 if (HasAVX2) { // AVX2 4227 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4228 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4229 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8); 4230 } else { 4231 // 256-bit logic and arithmetic instructions in AVX are all 4232 // floating-point, no support for integer ops. Emit fp zeroed vectors. 4233 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4234 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4235 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8); 4236 } 4237 } 4238 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4239} 4240 4241/// getOnesVector - Returns a vector of specified type with all bits set. 4242/// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with 4243/// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately. 4244/// Then bitcast to their original type, ensuring they get CSE'd. 4245static SDValue getOnesVector(EVT VT, bool HasAVX2, SelectionDAG &DAG, 4246 DebugLoc dl) { 4247 assert(VT.isVector() && "Expected a vector type"); 4248 assert((VT.is128BitVector() || VT.is256BitVector()) 4249 && "Expected a 128-bit or 256-bit vector type"); 4250 4251 SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); 4252 SDValue Vec; 4253 if (VT.getSizeInBits() == 256) { 4254 if (HasAVX2) { // AVX2 4255 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4256 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8); 4257 } else { // AVX 4258 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4259 SDValue InsV = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, MVT::v8i32), 4260 Vec, DAG.getConstant(0, MVT::i32), DAG, dl); 4261 Vec = Insert128BitVector(InsV, Vec, 4262 DAG.getConstant(4 /* NumElems/2 */, MVT::i32), DAG, dl); 4263 } 4264 } else { 4265 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4266 } 4267 4268 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4269} 4270 4271/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 4272/// that point to V2 points to its first element. 4273static SDValue NormalizeMask(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 4274 EVT VT = SVOp->getValueType(0); 4275 unsigned NumElems = VT.getVectorNumElements(); 4276 4277 bool Changed = false; 4278 SmallVector<int, 8> MaskVec(SVOp->getMask().begin(), SVOp->getMask().end()); 4279 4280 for (unsigned i = 0; i != NumElems; ++i) { 4281 if (MaskVec[i] > (int)NumElems) { 4282 MaskVec[i] = NumElems; 4283 Changed = true; 4284 } 4285 } 4286 if (Changed) 4287 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(0), 4288 SVOp->getOperand(1), &MaskVec[0]); 4289 return SDValue(SVOp, 0); 4290} 4291 4292/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 4293/// operation of specified width. 4294static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4295 SDValue V2) { 4296 unsigned NumElems = VT.getVectorNumElements(); 4297 SmallVector<int, 8> Mask; 4298 Mask.push_back(NumElems); 4299 for (unsigned i = 1; i != NumElems; ++i) 4300 Mask.push_back(i); 4301 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4302} 4303 4304/// getUnpackl - Returns a vector_shuffle node for an unpackl operation. 4305static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4306 SDValue V2) { 4307 unsigned NumElems = VT.getVectorNumElements(); 4308 SmallVector<int, 8> Mask; 4309 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 4310 Mask.push_back(i); 4311 Mask.push_back(i + NumElems); 4312 } 4313 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4314} 4315 4316/// getUnpackh - Returns a vector_shuffle node for an unpackh operation. 4317static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4318 SDValue V2) { 4319 unsigned NumElems = VT.getVectorNumElements(); 4320 unsigned Half = NumElems/2; 4321 SmallVector<int, 8> Mask; 4322 for (unsigned i = 0; i != Half; ++i) { 4323 Mask.push_back(i + Half); 4324 Mask.push_back(i + NumElems + Half); 4325 } 4326 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4327} 4328 4329// PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by 4330// a generic shuffle instruction because the target has no such instructions. 4331// Generate shuffles which repeat i16 and i8 several times until they can be 4332// represented by v4f32 and then be manipulated by target suported shuffles. 4333static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) { 4334 EVT VT = V.getValueType(); 4335 int NumElems = VT.getVectorNumElements(); 4336 DebugLoc dl = V.getDebugLoc(); 4337 4338 while (NumElems > 4) { 4339 if (EltNo < NumElems/2) { 4340 V = getUnpackl(DAG, dl, VT, V, V); 4341 } else { 4342 V = getUnpackh(DAG, dl, VT, V, V); 4343 EltNo -= NumElems/2; 4344 } 4345 NumElems >>= 1; 4346 } 4347 return V; 4348} 4349 4350/// getLegalSplat - Generate a legal splat with supported x86 shuffles 4351static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) { 4352 EVT VT = V.getValueType(); 4353 DebugLoc dl = V.getDebugLoc(); 4354 assert((VT.getSizeInBits() == 128 || VT.getSizeInBits() == 256) 4355 && "Vector size not supported"); 4356 4357 if (VT.getSizeInBits() == 128) { 4358 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V); 4359 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo }; 4360 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32), 4361 &SplatMask[0]); 4362 } else { 4363 // To use VPERMILPS to splat scalars, the second half of indicies must 4364 // refer to the higher part, which is a duplication of the lower one, 4365 // because VPERMILPS can only handle in-lane permutations. 4366 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo, 4367 EltNo+4, EltNo+4, EltNo+4, EltNo+4 }; 4368 4369 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V); 4370 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32), 4371 &SplatMask[0]); 4372 } 4373 4374 return DAG.getNode(ISD::BITCAST, dl, VT, V); 4375} 4376 4377/// PromoteSplat - Splat is promoted to target supported vector shuffles. 4378static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { 4379 EVT SrcVT = SV->getValueType(0); 4380 SDValue V1 = SV->getOperand(0); 4381 DebugLoc dl = SV->getDebugLoc(); 4382 4383 int EltNo = SV->getSplatIndex(); 4384 int NumElems = SrcVT.getVectorNumElements(); 4385 unsigned Size = SrcVT.getSizeInBits(); 4386 4387 assert(((Size == 128 && NumElems > 4) || Size == 256) && 4388 "Unknown how to promote splat for type"); 4389 4390 // Extract the 128-bit part containing the splat element and update 4391 // the splat element index when it refers to the higher register. 4392 if (Size == 256) { 4393 unsigned Idx = (EltNo >= NumElems/2) ? NumElems/2 : 0; 4394 V1 = Extract128BitVector(V1, DAG.getConstant(Idx, MVT::i32), DAG, dl); 4395 if (Idx > 0) 4396 EltNo -= NumElems/2; 4397 } 4398 4399 // All i16 and i8 vector types can't be used directly by a generic shuffle 4400 // instruction because the target has no such instruction. Generate shuffles 4401 // which repeat i16 and i8 several times until they fit in i32, and then can 4402 // be manipulated by target suported shuffles. 4403 EVT EltVT = SrcVT.getVectorElementType(); 4404 if (EltVT == MVT::i8 || EltVT == MVT::i16) 4405 V1 = PromoteSplati8i16(V1, DAG, EltNo); 4406 4407 // Recreate the 256-bit vector and place the same 128-bit vector 4408 // into the low and high part. This is necessary because we want 4409 // to use VPERM* to shuffle the vectors 4410 if (Size == 256) { 4411 SDValue InsV = Insert128BitVector(DAG.getUNDEF(SrcVT), V1, 4412 DAG.getConstant(0, MVT::i32), DAG, dl); 4413 V1 = Insert128BitVector(InsV, V1, 4414 DAG.getConstant(NumElems/2, MVT::i32), DAG, dl); 4415 } 4416 4417 return getLegalSplat(DAG, V1, EltNo); 4418} 4419 4420/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 4421/// vector of zero or undef vector. This produces a shuffle where the low 4422/// element of V2 is swizzled into the zero/undef vector, landing at element 4423/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 4424static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, 4425 bool IsZero, 4426 const X86Subtarget *Subtarget, 4427 SelectionDAG &DAG) { 4428 EVT VT = V2.getValueType(); 4429 SDValue V1 = IsZero 4430 ? getZeroVector(VT, Subtarget->hasSSE2(), Subtarget->hasAVX2(), DAG, 4431 V2.getDebugLoc()) : DAG.getUNDEF(VT); 4432 unsigned NumElems = VT.getVectorNumElements(); 4433 SmallVector<int, 16> MaskVec; 4434 for (unsigned i = 0; i != NumElems; ++i) 4435 // If this is the insertion idx, put the low elt of V2 here. 4436 MaskVec.push_back(i == Idx ? NumElems : i); 4437 return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]); 4438} 4439 4440/// getShuffleScalarElt - Returns the scalar element that will make up the ith 4441/// element of the result of the vector shuffle. 4442static SDValue getShuffleScalarElt(SDNode *N, int Index, SelectionDAG &DAG, 4443 unsigned Depth) { 4444 if (Depth == 6) 4445 return SDValue(); // Limit search depth. 4446 4447 SDValue V = SDValue(N, 0); 4448 EVT VT = V.getValueType(); 4449 unsigned Opcode = V.getOpcode(); 4450 4451 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars. 4452 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) { 4453 Index = SV->getMaskElt(Index); 4454 4455 if (Index < 0) 4456 return DAG.getUNDEF(VT.getVectorElementType()); 4457 4458 int NumElems = VT.getVectorNumElements(); 4459 SDValue NewV = (Index < NumElems) ? SV->getOperand(0) : SV->getOperand(1); 4460 return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG, Depth+1); 4461 } 4462 4463 // Recurse into target specific vector shuffles to find scalars. 4464 if (isTargetShuffle(Opcode)) { 4465 int NumElems = VT.getVectorNumElements(); 4466 SmallVector<unsigned, 16> ShuffleMask; 4467 SDValue ImmN; 4468 4469 switch(Opcode) { 4470 case X86ISD::SHUFP: 4471 ImmN = N->getOperand(N->getNumOperands()-1); 4472 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), 4473 ShuffleMask); 4474 break; 4475 case X86ISD::UNPCKH: 4476 DecodeUNPCKHMask(VT, ShuffleMask); 4477 break; 4478 case X86ISD::UNPCKL: 4479 DecodeUNPCKLMask(VT, ShuffleMask); 4480 break; 4481 case X86ISD::MOVHLPS: 4482 DecodeMOVHLPSMask(NumElems, ShuffleMask); 4483 break; 4484 case X86ISD::MOVLHPS: 4485 DecodeMOVLHPSMask(NumElems, ShuffleMask); 4486 break; 4487 case X86ISD::PSHUFD: 4488 ImmN = N->getOperand(N->getNumOperands()-1); 4489 DecodePSHUFMask(NumElems, 4490 cast<ConstantSDNode>(ImmN)->getZExtValue(), 4491 ShuffleMask); 4492 break; 4493 case X86ISD::PSHUFHW: 4494 ImmN = N->getOperand(N->getNumOperands()-1); 4495 DecodePSHUFHWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), 4496 ShuffleMask); 4497 break; 4498 case X86ISD::PSHUFLW: 4499 ImmN = N->getOperand(N->getNumOperands()-1); 4500 DecodePSHUFLWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), 4501 ShuffleMask); 4502 break; 4503 case X86ISD::MOVSS: 4504 case X86ISD::MOVSD: { 4505 // The index 0 always comes from the first element of the second source, 4506 // this is why MOVSS and MOVSD are used in the first place. The other 4507 // elements come from the other positions of the first source vector. 4508 unsigned OpNum = (Index == 0) ? 1 : 0; 4509 return getShuffleScalarElt(V.getOperand(OpNum).getNode(), Index, DAG, 4510 Depth+1); 4511 } 4512 case X86ISD::VPERMILP: 4513 ImmN = N->getOperand(N->getNumOperands()-1); 4514 DecodeVPERMILPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), 4515 ShuffleMask); 4516 break; 4517 case X86ISD::VPERM2X128: 4518 ImmN = N->getOperand(N->getNumOperands()-1); 4519 DecodeVPERM2F128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), 4520 ShuffleMask); 4521 break; 4522 case X86ISD::MOVDDUP: 4523 case X86ISD::MOVLHPD: 4524 case X86ISD::MOVLPD: 4525 case X86ISD::MOVLPS: 4526 case X86ISD::MOVSHDUP: 4527 case X86ISD::MOVSLDUP: 4528 case X86ISD::PALIGN: 4529 return SDValue(); // Not yet implemented. 4530 default: 4531 assert(0 && "unknown target shuffle node"); 4532 return SDValue(); 4533 } 4534 4535 Index = ShuffleMask[Index]; 4536 if (Index < 0) 4537 return DAG.getUNDEF(VT.getVectorElementType()); 4538 4539 SDValue NewV = (Index < NumElems) ? N->getOperand(0) : N->getOperand(1); 4540 return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG, 4541 Depth+1); 4542 } 4543 4544 // Actual nodes that may contain scalar elements 4545 if (Opcode == ISD::BITCAST) { 4546 V = V.getOperand(0); 4547 EVT SrcVT = V.getValueType(); 4548 unsigned NumElems = VT.getVectorNumElements(); 4549 4550 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems) 4551 return SDValue(); 4552 } 4553 4554 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) 4555 return (Index == 0) ? V.getOperand(0) 4556 : DAG.getUNDEF(VT.getVectorElementType()); 4557 4558 if (V.getOpcode() == ISD::BUILD_VECTOR) 4559 return V.getOperand(Index); 4560 4561 return SDValue(); 4562} 4563 4564/// getNumOfConsecutiveZeros - Return the number of elements of a vector 4565/// shuffle operation which come from a consecutively from a zero. The 4566/// search can start in two different directions, from left or right. 4567static 4568unsigned getNumOfConsecutiveZeros(SDNode *N, int NumElems, 4569 bool ZerosFromLeft, SelectionDAG &DAG) { 4570 int i = 0; 4571 4572 while (i < NumElems) { 4573 unsigned Index = ZerosFromLeft ? i : NumElems-i-1; 4574 SDValue Elt = getShuffleScalarElt(N, Index, DAG, 0); 4575 if (!(Elt.getNode() && 4576 (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt)))) 4577 break; 4578 ++i; 4579 } 4580 4581 return i; 4582} 4583 4584/// isShuffleMaskConsecutive - Check if the shuffle mask indicies from MaskI to 4585/// MaskE correspond consecutively to elements from one of the vector operands, 4586/// starting from its index OpIdx. Also tell OpNum which source vector operand. 4587static 4588bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, int MaskI, int MaskE, 4589 int OpIdx, int NumElems, unsigned &OpNum) { 4590 bool SeenV1 = false; 4591 bool SeenV2 = false; 4592 4593 for (int i = MaskI; i <= MaskE; ++i, ++OpIdx) { 4594 int Idx = SVOp->getMaskElt(i); 4595 // Ignore undef indicies 4596 if (Idx < 0) 4597 continue; 4598 4599 if (Idx < NumElems) 4600 SeenV1 = true; 4601 else 4602 SeenV2 = true; 4603 4604 // Only accept consecutive elements from the same vector 4605 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2)) 4606 return false; 4607 } 4608 4609 OpNum = SeenV1 ? 0 : 1; 4610 return true; 4611} 4612 4613/// isVectorShiftRight - Returns true if the shuffle can be implemented as a 4614/// logical left shift of a vector. 4615static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4616 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4617 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4618 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 4619 false /* check zeros from right */, DAG); 4620 unsigned OpSrc; 4621 4622 if (!NumZeros) 4623 return false; 4624 4625 // Considering the elements in the mask that are not consecutive zeros, 4626 // check if they consecutively come from only one of the source vectors. 4627 // 4628 // V1 = {X, A, B, C} 0 4629 // \ \ \ / 4630 // vector_shuffle V1, V2 <1, 2, 3, X> 4631 // 4632 if (!isShuffleMaskConsecutive(SVOp, 4633 0, // Mask Start Index 4634 NumElems-NumZeros-1, // Mask End Index 4635 NumZeros, // Where to start looking in the src vector 4636 NumElems, // Number of elements in vector 4637 OpSrc)) // Which source operand ? 4638 return false; 4639 4640 isLeft = false; 4641 ShAmt = NumZeros; 4642 ShVal = SVOp->getOperand(OpSrc); 4643 return true; 4644} 4645 4646/// isVectorShiftLeft - Returns true if the shuffle can be implemented as a 4647/// logical left shift of a vector. 4648static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4649 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4650 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4651 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 4652 true /* check zeros from left */, DAG); 4653 unsigned OpSrc; 4654 4655 if (!NumZeros) 4656 return false; 4657 4658 // Considering the elements in the mask that are not consecutive zeros, 4659 // check if they consecutively come from only one of the source vectors. 4660 // 4661 // 0 { A, B, X, X } = V2 4662 // / \ / / 4663 // vector_shuffle V1, V2 <X, X, 4, 5> 4664 // 4665 if (!isShuffleMaskConsecutive(SVOp, 4666 NumZeros, // Mask Start Index 4667 NumElems-1, // Mask End Index 4668 0, // Where to start looking in the src vector 4669 NumElems, // Number of elements in vector 4670 OpSrc)) // Which source operand ? 4671 return false; 4672 4673 isLeft = true; 4674 ShAmt = NumZeros; 4675 ShVal = SVOp->getOperand(OpSrc); 4676 return true; 4677} 4678 4679/// isVectorShift - Returns true if the shuffle can be implemented as a 4680/// logical left or right shift of a vector. 4681static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4682 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4683 // Although the logic below support any bitwidth size, there are no 4684 // shift instructions which handle more than 128-bit vectors. 4685 if (SVOp->getValueType(0).getSizeInBits() > 128) 4686 return false; 4687 4688 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) || 4689 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt)) 4690 return true; 4691 4692 return false; 4693} 4694 4695/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 4696/// 4697static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, 4698 unsigned NumNonZero, unsigned NumZero, 4699 SelectionDAG &DAG, 4700 const TargetLowering &TLI) { 4701 if (NumNonZero > 8) 4702 return SDValue(); 4703 4704 DebugLoc dl = Op.getDebugLoc(); 4705 SDValue V(0, 0); 4706 bool First = true; 4707 for (unsigned i = 0; i < 16; ++i) { 4708 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 4709 if (ThisIsNonZero && First) { 4710 if (NumZero) 4711 V = getZeroVector(MVT::v8i16, /*HasSSE2*/ true, /*HasAVX2*/ false, 4712 DAG, dl); 4713 else 4714 V = DAG.getUNDEF(MVT::v8i16); 4715 First = false; 4716 } 4717 4718 if ((i & 1) != 0) { 4719 SDValue ThisElt(0, 0), LastElt(0, 0); 4720 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 4721 if (LastIsNonZero) { 4722 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl, 4723 MVT::i16, Op.getOperand(i-1)); 4724 } 4725 if (ThisIsNonZero) { 4726 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i)); 4727 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, 4728 ThisElt, DAG.getConstant(8, MVT::i8)); 4729 if (LastIsNonZero) 4730 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt); 4731 } else 4732 ThisElt = LastElt; 4733 4734 if (ThisElt.getNode()) 4735 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt, 4736 DAG.getIntPtrConstant(i/2)); 4737 } 4738 } 4739 4740 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V); 4741} 4742 4743/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 4744/// 4745static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, 4746 unsigned NumNonZero, unsigned NumZero, 4747 SelectionDAG &DAG, 4748 const TargetLowering &TLI) { 4749 if (NumNonZero > 4) 4750 return SDValue(); 4751 4752 DebugLoc dl = Op.getDebugLoc(); 4753 SDValue V(0, 0); 4754 bool First = true; 4755 for (unsigned i = 0; i < 8; ++i) { 4756 bool isNonZero = (NonZeros & (1 << i)) != 0; 4757 if (isNonZero) { 4758 if (First) { 4759 if (NumZero) 4760 V = getZeroVector(MVT::v8i16, /*HasSSE2*/ true, /*HasAVX2*/ false, 4761 DAG, dl); 4762 else 4763 V = DAG.getUNDEF(MVT::v8i16); 4764 First = false; 4765 } 4766 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, 4767 MVT::v8i16, V, Op.getOperand(i), 4768 DAG.getIntPtrConstant(i)); 4769 } 4770 } 4771 4772 return V; 4773} 4774 4775/// getVShift - Return a vector logical shift node. 4776/// 4777static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, 4778 unsigned NumBits, SelectionDAG &DAG, 4779 const TargetLowering &TLI, DebugLoc dl) { 4780 assert(VT.getSizeInBits() == 128 && "Unknown type for VShift"); 4781 EVT ShVT = MVT::v2i64; 4782 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ; 4783 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp); 4784 return DAG.getNode(ISD::BITCAST, dl, VT, 4785 DAG.getNode(Opc, dl, ShVT, SrcOp, 4786 DAG.getConstant(NumBits, 4787 TLI.getShiftAmountTy(SrcOp.getValueType())))); 4788} 4789 4790SDValue 4791X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, 4792 SelectionDAG &DAG) const { 4793 4794 // Check if the scalar load can be widened into a vector load. And if 4795 // the address is "base + cst" see if the cst can be "absorbed" into 4796 // the shuffle mask. 4797 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) { 4798 SDValue Ptr = LD->getBasePtr(); 4799 if (!ISD::isNormalLoad(LD) || LD->isVolatile()) 4800 return SDValue(); 4801 EVT PVT = LD->getValueType(0); 4802 if (PVT != MVT::i32 && PVT != MVT::f32) 4803 return SDValue(); 4804 4805 int FI = -1; 4806 int64_t Offset = 0; 4807 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) { 4808 FI = FINode->getIndex(); 4809 Offset = 0; 4810 } else if (DAG.isBaseWithConstantOffset(Ptr) && 4811 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 4812 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 4813 Offset = Ptr.getConstantOperandVal(1); 4814 Ptr = Ptr.getOperand(0); 4815 } else { 4816 return SDValue(); 4817 } 4818 4819 // FIXME: 256-bit vector instructions don't require a strict alignment, 4820 // improve this code to support it better. 4821 unsigned RequiredAlign = VT.getSizeInBits()/8; 4822 SDValue Chain = LD->getChain(); 4823 // Make sure the stack object alignment is at least 16 or 32. 4824 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 4825 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) { 4826 if (MFI->isFixedObjectIndex(FI)) { 4827 // Can't change the alignment. FIXME: It's possible to compute 4828 // the exact stack offset and reference FI + adjust offset instead. 4829 // If someone *really* cares about this. That's the way to implement it. 4830 return SDValue(); 4831 } else { 4832 MFI->setObjectAlignment(FI, RequiredAlign); 4833 } 4834 } 4835 4836 // (Offset % 16 or 32) must be multiple of 4. Then address is then 4837 // Ptr + (Offset & ~15). 4838 if (Offset < 0) 4839 return SDValue(); 4840 if ((Offset % RequiredAlign) & 3) 4841 return SDValue(); 4842 int64_t StartOffset = Offset & ~(RequiredAlign-1); 4843 if (StartOffset) 4844 Ptr = DAG.getNode(ISD::ADD, Ptr.getDebugLoc(), Ptr.getValueType(), 4845 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType())); 4846 4847 int EltNo = (Offset - StartOffset) >> 2; 4848 int NumElems = VT.getVectorNumElements(); 4849 4850 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems); 4851 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr, 4852 LD->getPointerInfo().getWithOffset(StartOffset), 4853 false, false, false, 0); 4854 4855 SmallVector<int, 8> Mask; 4856 for (int i = 0; i < NumElems; ++i) 4857 Mask.push_back(EltNo); 4858 4859 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]); 4860 } 4861 4862 return SDValue(); 4863} 4864 4865/// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a 4866/// vector of type 'VT', see if the elements can be replaced by a single large 4867/// load which has the same value as a build_vector whose operands are 'elts'. 4868/// 4869/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a 4870/// 4871/// FIXME: we'd also like to handle the case where the last elements are zero 4872/// rather than undef via VZEXT_LOAD, but we do not detect that case today. 4873/// There's even a handy isZeroNode for that purpose. 4874static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, 4875 DebugLoc &DL, SelectionDAG &DAG) { 4876 EVT EltVT = VT.getVectorElementType(); 4877 unsigned NumElems = Elts.size(); 4878 4879 LoadSDNode *LDBase = NULL; 4880 unsigned LastLoadedElt = -1U; 4881 4882 // For each element in the initializer, see if we've found a load or an undef. 4883 // If we don't find an initial load element, or later load elements are 4884 // non-consecutive, bail out. 4885 for (unsigned i = 0; i < NumElems; ++i) { 4886 SDValue Elt = Elts[i]; 4887 4888 if (!Elt.getNode() || 4889 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode()))) 4890 return SDValue(); 4891 if (!LDBase) { 4892 if (Elt.getNode()->getOpcode() == ISD::UNDEF) 4893 return SDValue(); 4894 LDBase = cast<LoadSDNode>(Elt.getNode()); 4895 LastLoadedElt = i; 4896 continue; 4897 } 4898 if (Elt.getOpcode() == ISD::UNDEF) 4899 continue; 4900 4901 LoadSDNode *LD = cast<LoadSDNode>(Elt); 4902 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i)) 4903 return SDValue(); 4904 LastLoadedElt = i; 4905 } 4906 4907 // If we have found an entire vector of loads and undefs, then return a large 4908 // load of the entire vector width starting at the base pointer. If we found 4909 // consecutive loads for the low half, generate a vzext_load node. 4910 if (LastLoadedElt == NumElems - 1) { 4911 if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16) 4912 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 4913 LDBase->getPointerInfo(), 4914 LDBase->isVolatile(), LDBase->isNonTemporal(), 4915 LDBase->isInvariant(), 0); 4916 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 4917 LDBase->getPointerInfo(), 4918 LDBase->isVolatile(), LDBase->isNonTemporal(), 4919 LDBase->isInvariant(), LDBase->getAlignment()); 4920 } else if (NumElems == 4 && LastLoadedElt == 1 && 4921 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) { 4922 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other); 4923 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() }; 4924 SDValue ResNode = 4925 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, 2, MVT::i64, 4926 LDBase->getPointerInfo(), 4927 LDBase->getAlignment(), 4928 false/*isVolatile*/, true/*ReadMem*/, 4929 false/*WriteMem*/); 4930 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode); 4931 } 4932 return SDValue(); 4933} 4934 4935/// isVectorBroadcast - Check if the node chain is suitable to be xformed to 4936/// a vbroadcast node. We support two patterns: 4937/// 1. A splat BUILD_VECTOR which uses a single scalar load. 4938/// 2. A splat shuffle which uses a scalar_to_vector node which comes from 4939/// a scalar load. 4940/// The scalar load node is returned when a pattern is found, 4941/// or SDValue() otherwise. 4942static SDValue isVectorBroadcast(SDValue &Op, const X86Subtarget *Subtarget) { 4943 if (!Subtarget->hasAVX()) 4944 return SDValue(); 4945 4946 EVT VT = Op.getValueType(); 4947 SDValue V = Op; 4948 4949 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 4950 V = V.getOperand(0); 4951 4952 //A suspected load to be broadcasted. 4953 SDValue Ld; 4954 4955 switch (V.getOpcode()) { 4956 default: 4957 // Unknown pattern found. 4958 return SDValue(); 4959 4960 case ISD::BUILD_VECTOR: { 4961 // The BUILD_VECTOR node must be a splat. 4962 if (!isSplatVector(V.getNode())) 4963 return SDValue(); 4964 4965 Ld = V.getOperand(0); 4966 4967 // The suspected load node has several users. Make sure that all 4968 // of its users are from the BUILD_VECTOR node. 4969 if (!Ld->hasNUsesOfValue(VT.getVectorNumElements(), 0)) 4970 return SDValue(); 4971 break; 4972 } 4973 4974 case ISD::VECTOR_SHUFFLE: { 4975 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 4976 4977 // Shuffles must have a splat mask where the first element is 4978 // broadcasted. 4979 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0) 4980 return SDValue(); 4981 4982 SDValue Sc = Op.getOperand(0); 4983 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR) 4984 return SDValue(); 4985 4986 Ld = Sc.getOperand(0); 4987 4988 // The scalar_to_vector node and the suspected 4989 // load node must have exactly one user. 4990 if (!Sc.hasOneUse() || !Ld.hasOneUse()) 4991 return SDValue(); 4992 break; 4993 } 4994 } 4995 4996 // The scalar source must be a normal load. 4997 if (!ISD::isNormalLoad(Ld.getNode())) 4998 return SDValue(); 4999 5000 bool Is256 = VT.getSizeInBits() == 256; 5001 bool Is128 = VT.getSizeInBits() == 128; 5002 unsigned ScalarSize = Ld.getValueType().getSizeInBits(); 5003 5004 // VBroadcast to YMM 5005 if (Is256 && (ScalarSize == 32 || ScalarSize == 64)) 5006 return Ld; 5007 5008 // VBroadcast to XMM 5009 if (Is128 && (ScalarSize == 32)) 5010 return Ld; 5011 5012 // The integer check is needed for the 64-bit into 128-bit so it doesn't match 5013 // double since there is vbroadcastsd xmm 5014 if (Subtarget->hasAVX2() && Ld.getValueType().isInteger()) { 5015 // VBroadcast to YMM 5016 if (Is256 && (ScalarSize == 8 || ScalarSize == 16)) 5017 return Ld; 5018 5019 // VBroadcast to XMM 5020 if (Is128 && (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)) 5021 return Ld; 5022 } 5023 5024 // Unsupported broadcast. 5025 return SDValue(); 5026} 5027 5028SDValue 5029X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { 5030 DebugLoc dl = Op.getDebugLoc(); 5031 5032 EVT VT = Op.getValueType(); 5033 EVT ExtVT = VT.getVectorElementType(); 5034 unsigned NumElems = Op.getNumOperands(); 5035 5036 // Vectors containing all zeros can be matched by pxor and xorps later 5037 if (ISD::isBuildVectorAllZeros(Op.getNode())) { 5038 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd 5039 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts. 5040 if (VT == MVT::v4i32 || VT == MVT::v8i32) 5041 return Op; 5042 5043 return getZeroVector(VT, Subtarget->hasSSE2(), 5044 Subtarget->hasAVX2(), DAG, dl); 5045 } 5046 5047 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width 5048 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use 5049 // vpcmpeqd on 256-bit vectors. 5050 if (ISD::isBuildVectorAllOnes(Op.getNode())) { 5051 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasAVX2())) 5052 return Op; 5053 5054 return getOnesVector(VT, Subtarget->hasAVX2(), DAG, dl); 5055 } 5056 5057 SDValue LD = isVectorBroadcast(Op, Subtarget); 5058 if (LD.getNode()) 5059 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, LD); 5060 5061 unsigned EVTBits = ExtVT.getSizeInBits(); 5062 5063 unsigned NumZero = 0; 5064 unsigned NumNonZero = 0; 5065 unsigned NonZeros = 0; 5066 bool IsAllConstants = true; 5067 SmallSet<SDValue, 8> Values; 5068 for (unsigned i = 0; i < NumElems; ++i) { 5069 SDValue Elt = Op.getOperand(i); 5070 if (Elt.getOpcode() == ISD::UNDEF) 5071 continue; 5072 Values.insert(Elt); 5073 if (Elt.getOpcode() != ISD::Constant && 5074 Elt.getOpcode() != ISD::ConstantFP) 5075 IsAllConstants = false; 5076 if (X86::isZeroNode(Elt)) 5077 NumZero++; 5078 else { 5079 NonZeros |= (1 << i); 5080 NumNonZero++; 5081 } 5082 } 5083 5084 // All undef vector. Return an UNDEF. All zero vectors were handled above. 5085 if (NumNonZero == 0) 5086 return DAG.getUNDEF(VT); 5087 5088 // Special case for single non-zero, non-undef, element. 5089 if (NumNonZero == 1) { 5090 unsigned Idx = CountTrailingZeros_32(NonZeros); 5091 SDValue Item = Op.getOperand(Idx); 5092 5093 // If this is an insertion of an i64 value on x86-32, and if the top bits of 5094 // the value are obviously zero, truncate the value to i32 and do the 5095 // insertion that way. Only do this if the value is non-constant or if the 5096 // value is a constant being inserted into element 0. It is cheaper to do 5097 // a constant pool load than it is to do a movd + shuffle. 5098 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() && 5099 (!IsAllConstants || Idx == 0)) { 5100 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 5101 // Handle SSE only. 5102 assert(VT == MVT::v2i64 && "Expected an SSE value type!"); 5103 EVT VecVT = MVT::v4i32; 5104 unsigned VecElts = 4; 5105 5106 // Truncate the value (which may itself be a constant) to i32, and 5107 // convert it to a vector with movd (S2V+shuffle to zero extend). 5108 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item); 5109 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item); 5110 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5111 5112 // Now we have our 32-bit value zero extended in the low element of 5113 // a vector. If Idx != 0, swizzle it into place. 5114 if (Idx != 0) { 5115 SmallVector<int, 4> Mask; 5116 Mask.push_back(Idx); 5117 for (unsigned i = 1; i != VecElts; ++i) 5118 Mask.push_back(i); 5119 Item = DAG.getVectorShuffle(VecVT, dl, Item, 5120 DAG.getUNDEF(Item.getValueType()), 5121 &Mask[0]); 5122 } 5123 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5124 } 5125 } 5126 5127 // If we have a constant or non-constant insertion into the low element of 5128 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 5129 // the rest of the elements. This will be matched as movd/movq/movss/movsd 5130 // depending on what the source datatype is. 5131 if (Idx == 0) { 5132 if (NumZero == 0) 5133 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5134 5135 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 || 5136 (ExtVT == MVT::i64 && Subtarget->is64Bit())) { 5137 if (VT.getSizeInBits() == 256) { 5138 SDValue ZeroVec = getZeroVector(VT, Subtarget->hasSSE2(), 5139 Subtarget->hasAVX2(), DAG, dl); 5140 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec, 5141 Item, DAG.getIntPtrConstant(0)); 5142 } 5143 assert(VT.getSizeInBits() == 128 && "Expected an SSE value type!"); 5144 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5145 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 5146 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5147 } 5148 5149 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) { 5150 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item); 5151 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item); 5152 if (VT.getSizeInBits() == 256) { 5153 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget->hasSSE2(), 5154 Subtarget->hasAVX2(), DAG, dl); 5155 Item = Insert128BitVector(ZeroVec, Item, DAG.getConstant(0, MVT::i32), 5156 DAG, dl); 5157 } else { 5158 assert(VT.getSizeInBits() == 128 && "Expected an SSE value type!"); 5159 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5160 } 5161 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5162 } 5163 } 5164 5165 // Is it a vector logical left shift? 5166 if (NumElems == 2 && Idx == 1 && 5167 X86::isZeroNode(Op.getOperand(0)) && 5168 !X86::isZeroNode(Op.getOperand(1))) { 5169 unsigned NumBits = VT.getSizeInBits(); 5170 return getVShift(true, VT, 5171 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 5172 VT, Op.getOperand(1)), 5173 NumBits/2, DAG, *this, dl); 5174 } 5175 5176 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 5177 return SDValue(); 5178 5179 // Otherwise, if this is a vector with i32 or f32 elements, and the element 5180 // is a non-constant being inserted into an element other than the low one, 5181 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 5182 // movd/movss) to move this into the low element, then shuffle it into 5183 // place. 5184 if (EVTBits == 32) { 5185 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5186 5187 // Turn it into a shuffle of zero and zero-extended scalar to vector. 5188 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG); 5189 SmallVector<int, 8> MaskVec; 5190 for (unsigned i = 0; i < NumElems; i++) 5191 MaskVec.push_back(i == Idx ? 0 : 1); 5192 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]); 5193 } 5194 } 5195 5196 // Splat is obviously ok. Let legalizer expand it to a shuffle. 5197 if (Values.size() == 1) { 5198 if (EVTBits == 32) { 5199 // Instead of a shuffle like this: 5200 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0> 5201 // Check if it's possible to issue this instead. 5202 // shuffle (vload ptr)), undef, <1, 1, 1, 1> 5203 unsigned Idx = CountTrailingZeros_32(NonZeros); 5204 SDValue Item = Op.getOperand(Idx); 5205 if (Op.getNode()->isOnlyUserOf(Item.getNode())) 5206 return LowerAsSplatVectorLoad(Item, VT, dl, DAG); 5207 } 5208 return SDValue(); 5209 } 5210 5211 // A vector full of immediates; various special cases are already 5212 // handled, so this is best done with a single constant-pool load. 5213 if (IsAllConstants) 5214 return SDValue(); 5215 5216 // For AVX-length vectors, build the individual 128-bit pieces and use 5217 // shuffles to put them in place. 5218 if (VT.getSizeInBits() == 256 && !ISD::isBuildVectorAllZeros(Op.getNode())) { 5219 SmallVector<SDValue, 32> V; 5220 for (unsigned i = 0; i < NumElems; ++i) 5221 V.push_back(Op.getOperand(i)); 5222 5223 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2); 5224 5225 // Build both the lower and upper subvector. 5226 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[0], NumElems/2); 5227 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[NumElems / 2], 5228 NumElems/2); 5229 5230 // Recreate the wider vector with the lower and upper part. 5231 SDValue Vec = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), Lower, 5232 DAG.getConstant(0, MVT::i32), DAG, dl); 5233 return Insert128BitVector(Vec, Upper, DAG.getConstant(NumElems/2, MVT::i32), 5234 DAG, dl); 5235 } 5236 5237 // Let legalizer expand 2-wide build_vectors. 5238 if (EVTBits == 64) { 5239 if (NumNonZero == 1) { 5240 // One half is zero or undef. 5241 unsigned Idx = CountTrailingZeros_32(NonZeros); 5242 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, 5243 Op.getOperand(Idx)); 5244 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG); 5245 } 5246 return SDValue(); 5247 } 5248 5249 // If element VT is < 32 bits, convert it to inserts into a zero vector. 5250 if (EVTBits == 8 && NumElems == 16) { 5251 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 5252 *this); 5253 if (V.getNode()) return V; 5254 } 5255 5256 if (EVTBits == 16 && NumElems == 8) { 5257 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 5258 *this); 5259 if (V.getNode()) return V; 5260 } 5261 5262 // If element VT is == 32 bits, turn it into a number of shuffles. 5263 SmallVector<SDValue, 8> V(NumElems); 5264 if (NumElems == 4 && NumZero > 0) { 5265 for (unsigned i = 0; i < 4; ++i) { 5266 bool isZero = !(NonZeros & (1 << i)); 5267 if (isZero) 5268 V[i] = getZeroVector(VT, Subtarget->hasSSE2(), Subtarget->hasAVX2(), 5269 DAG, dl); 5270 else 5271 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5272 } 5273 5274 for (unsigned i = 0; i < 2; ++i) { 5275 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 5276 default: break; 5277 case 0: 5278 V[i] = V[i*2]; // Must be a zero vector. 5279 break; 5280 case 1: 5281 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]); 5282 break; 5283 case 2: 5284 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]); 5285 break; 5286 case 3: 5287 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]); 5288 break; 5289 } 5290 } 5291 5292 bool Reverse1 = (NonZeros & 0x3) == 2; 5293 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2; 5294 int MaskVec[] = { 5295 Reverse1 ? 1 : 0, 5296 Reverse1 ? 0 : 1, 5297 static_cast<int>(Reverse2 ? 1-NumElems : NumElems), 5298 static_cast<int>(Reverse2 ? NumElems : 1+NumElems) 5299 }; 5300 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]); 5301 } 5302 5303 if (Values.size() > 1 && VT.getSizeInBits() == 128) { 5304 // Check for a build vector of consecutive loads. 5305 for (unsigned i = 0; i < NumElems; ++i) 5306 V[i] = Op.getOperand(i); 5307 5308 // Check for elements which are consecutive loads. 5309 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG); 5310 if (LD.getNode()) 5311 return LD; 5312 5313 // For SSE 4.1, use insertps to put the high elements into the low element. 5314 if (getSubtarget()->hasSSE41()) { 5315 SDValue Result; 5316 if (Op.getOperand(0).getOpcode() != ISD::UNDEF) 5317 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); 5318 else 5319 Result = DAG.getUNDEF(VT); 5320 5321 for (unsigned i = 1; i < NumElems; ++i) { 5322 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue; 5323 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, 5324 Op.getOperand(i), DAG.getIntPtrConstant(i)); 5325 } 5326 return Result; 5327 } 5328 5329 // Otherwise, expand into a number of unpckl*, start by extending each of 5330 // our (non-undef) elements to the full vector width with the element in the 5331 // bottom slot of the vector (which generates no code for SSE). 5332 for (unsigned i = 0; i < NumElems; ++i) { 5333 if (Op.getOperand(i).getOpcode() != ISD::UNDEF) 5334 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5335 else 5336 V[i] = DAG.getUNDEF(VT); 5337 } 5338 5339 // Next, we iteratively mix elements, e.g. for v4f32: 5340 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 5341 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 5342 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 5343 unsigned EltStride = NumElems >> 1; 5344 while (EltStride != 0) { 5345 for (unsigned i = 0; i < EltStride; ++i) { 5346 // If V[i+EltStride] is undef and this is the first round of mixing, 5347 // then it is safe to just drop this shuffle: V[i] is already in the 5348 // right place, the one element (since it's the first round) being 5349 // inserted as undef can be dropped. This isn't safe for successive 5350 // rounds because they will permute elements within both vectors. 5351 if (V[i+EltStride].getOpcode() == ISD::UNDEF && 5352 EltStride == NumElems/2) 5353 continue; 5354 5355 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]); 5356 } 5357 EltStride >>= 1; 5358 } 5359 return V[0]; 5360 } 5361 return SDValue(); 5362} 5363 5364// LowerMMXCONCAT_VECTORS - We support concatenate two MMX registers and place 5365// them in a MMX register. This is better than doing a stack convert. 5366static SDValue LowerMMXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5367 DebugLoc dl = Op.getDebugLoc(); 5368 EVT ResVT = Op.getValueType(); 5369 5370 assert(ResVT == MVT::v2i64 || ResVT == MVT::v4i32 || 5371 ResVT == MVT::v8i16 || ResVT == MVT::v16i8); 5372 int Mask[2]; 5373 SDValue InVec = DAG.getNode(ISD::BITCAST,dl, MVT::v1i64, Op.getOperand(0)); 5374 SDValue VecOp = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec); 5375 InVec = Op.getOperand(1); 5376 if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5377 unsigned NumElts = ResVT.getVectorNumElements(); 5378 VecOp = DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp); 5379 VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ResVT, VecOp, 5380 InVec.getOperand(0), DAG.getIntPtrConstant(NumElts/2+1)); 5381 } else { 5382 InVec = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, InVec); 5383 SDValue VecOp2 = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec); 5384 Mask[0] = 0; Mask[1] = 2; 5385 VecOp = DAG.getVectorShuffle(MVT::v2i64, dl, VecOp, VecOp2, Mask); 5386 } 5387 return DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp); 5388} 5389 5390// LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction 5391// to create 256-bit vectors from two other 128-bit ones. 5392static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5393 DebugLoc dl = Op.getDebugLoc(); 5394 EVT ResVT = Op.getValueType(); 5395 5396 assert(ResVT.getSizeInBits() == 256 && "Value type must be 256-bit wide"); 5397 5398 SDValue V1 = Op.getOperand(0); 5399 SDValue V2 = Op.getOperand(1); 5400 unsigned NumElems = ResVT.getVectorNumElements(); 5401 5402 SDValue V = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, ResVT), V1, 5403 DAG.getConstant(0, MVT::i32), DAG, dl); 5404 return Insert128BitVector(V, V2, DAG.getConstant(NumElems/2, MVT::i32), 5405 DAG, dl); 5406} 5407 5408SDValue 5409X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const { 5410 EVT ResVT = Op.getValueType(); 5411 5412 assert(Op.getNumOperands() == 2); 5413 assert((ResVT.getSizeInBits() == 128 || ResVT.getSizeInBits() == 256) && 5414 "Unsupported CONCAT_VECTORS for value type"); 5415 5416 // We support concatenate two MMX registers and place them in a MMX register. 5417 // This is better than doing a stack convert. 5418 if (ResVT.is128BitVector()) 5419 return LowerMMXCONCAT_VECTORS(Op, DAG); 5420 5421 // 256-bit AVX can use the vinsertf128 instruction to create 256-bit vectors 5422 // from two other 128-bit ones. 5423 return LowerAVXCONCAT_VECTORS(Op, DAG); 5424} 5425 5426// v8i16 shuffles - Prefer shuffles in the following order: 5427// 1. [all] pshuflw, pshufhw, optional move 5428// 2. [ssse3] 1 x pshufb 5429// 3. [ssse3] 2 x pshufb + 1 x por 5430// 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw) 5431SDValue 5432X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op, 5433 SelectionDAG &DAG) const { 5434 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5435 SDValue V1 = SVOp->getOperand(0); 5436 SDValue V2 = SVOp->getOperand(1); 5437 DebugLoc dl = SVOp->getDebugLoc(); 5438 SmallVector<int, 8> MaskVals; 5439 5440 // Determine if more than 1 of the words in each of the low and high quadwords 5441 // of the result come from the same quadword of one of the two inputs. Undef 5442 // mask values count as coming from any quadword, for better codegen. 5443 unsigned LoQuad[] = { 0, 0, 0, 0 }; 5444 unsigned HiQuad[] = { 0, 0, 0, 0 }; 5445 BitVector InputQuads(4); 5446 for (unsigned i = 0; i < 8; ++i) { 5447 unsigned *Quad = i < 4 ? LoQuad : HiQuad; 5448 int EltIdx = SVOp->getMaskElt(i); 5449 MaskVals.push_back(EltIdx); 5450 if (EltIdx < 0) { 5451 ++Quad[0]; 5452 ++Quad[1]; 5453 ++Quad[2]; 5454 ++Quad[3]; 5455 continue; 5456 } 5457 ++Quad[EltIdx / 4]; 5458 InputQuads.set(EltIdx / 4); 5459 } 5460 5461 int BestLoQuad = -1; 5462 unsigned MaxQuad = 1; 5463 for (unsigned i = 0; i < 4; ++i) { 5464 if (LoQuad[i] > MaxQuad) { 5465 BestLoQuad = i; 5466 MaxQuad = LoQuad[i]; 5467 } 5468 } 5469 5470 int BestHiQuad = -1; 5471 MaxQuad = 1; 5472 for (unsigned i = 0; i < 4; ++i) { 5473 if (HiQuad[i] > MaxQuad) { 5474 BestHiQuad = i; 5475 MaxQuad = HiQuad[i]; 5476 } 5477 } 5478 5479 // For SSSE3, If all 8 words of the result come from only 1 quadword of each 5480 // of the two input vectors, shuffle them into one input vector so only a 5481 // single pshufb instruction is necessary. If There are more than 2 input 5482 // quads, disable the next transformation since it does not help SSSE3. 5483 bool V1Used = InputQuads[0] || InputQuads[1]; 5484 bool V2Used = InputQuads[2] || InputQuads[3]; 5485 if (Subtarget->hasSSSE3()) { 5486 if (InputQuads.count() == 2 && V1Used && V2Used) { 5487 BestLoQuad = InputQuads.find_first(); 5488 BestHiQuad = InputQuads.find_next(BestLoQuad); 5489 } 5490 if (InputQuads.count() > 2) { 5491 BestLoQuad = -1; 5492 BestHiQuad = -1; 5493 } 5494 } 5495 5496 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update 5497 // the shuffle mask. If a quad is scored as -1, that means that it contains 5498 // words from all 4 input quadwords. 5499 SDValue NewV; 5500 if (BestLoQuad >= 0 || BestHiQuad >= 0) { 5501 int MaskV[] = { 5502 BestLoQuad < 0 ? 0 : BestLoQuad, 5503 BestHiQuad < 0 ? 1 : BestHiQuad 5504 }; 5505 NewV = DAG.getVectorShuffle(MVT::v2i64, dl, 5506 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1), 5507 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]); 5508 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV); 5509 5510 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the 5511 // source words for the shuffle, to aid later transformations. 5512 bool AllWordsInNewV = true; 5513 bool InOrder[2] = { true, true }; 5514 for (unsigned i = 0; i != 8; ++i) { 5515 int idx = MaskVals[i]; 5516 if (idx != (int)i) 5517 InOrder[i/4] = false; 5518 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad) 5519 continue; 5520 AllWordsInNewV = false; 5521 break; 5522 } 5523 5524 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV; 5525 if (AllWordsInNewV) { 5526 for (int i = 0; i != 8; ++i) { 5527 int idx = MaskVals[i]; 5528 if (idx < 0) 5529 continue; 5530 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4; 5531 if ((idx != i) && idx < 4) 5532 pshufhw = false; 5533 if ((idx != i) && idx > 3) 5534 pshuflw = false; 5535 } 5536 V1 = NewV; 5537 V2Used = false; 5538 BestLoQuad = 0; 5539 BestHiQuad = 1; 5540 } 5541 5542 // If we've eliminated the use of V2, and the new mask is a pshuflw or 5543 // pshufhw, that's as cheap as it gets. Return the new shuffle. 5544 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) { 5545 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW; 5546 unsigned TargetMask = 0; 5547 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, 5548 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]); 5549 TargetMask = pshufhw ? X86::getShufflePSHUFHWImmediate(NewV.getNode()): 5550 X86::getShufflePSHUFLWImmediate(NewV.getNode()); 5551 V1 = NewV.getOperand(0); 5552 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG); 5553 } 5554 } 5555 5556 // If we have SSSE3, and all words of the result are from 1 input vector, 5557 // case 2 is generated, otherwise case 3 is generated. If no SSSE3 5558 // is present, fall back to case 4. 5559 if (Subtarget->hasSSSE3()) { 5560 SmallVector<SDValue,16> pshufbMask; 5561 5562 // If we have elements from both input vectors, set the high bit of the 5563 // shuffle mask element to zero out elements that come from V2 in the V1 5564 // mask, and elements that come from V1 in the V2 mask, so that the two 5565 // results can be OR'd together. 5566 bool TwoInputs = V1Used && V2Used; 5567 for (unsigned i = 0; i != 8; ++i) { 5568 int EltIdx = MaskVals[i] * 2; 5569 if (TwoInputs && (EltIdx >= 16)) { 5570 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5571 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5572 continue; 5573 } 5574 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 5575 pshufbMask.push_back(DAG.getConstant(EltIdx+1, MVT::i8)); 5576 } 5577 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1); 5578 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 5579 DAG.getNode(ISD::BUILD_VECTOR, dl, 5580 MVT::v16i8, &pshufbMask[0], 16)); 5581 if (!TwoInputs) 5582 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5583 5584 // Calculate the shuffle mask for the second input, shuffle it, and 5585 // OR it with the first shuffled input. 5586 pshufbMask.clear(); 5587 for (unsigned i = 0; i != 8; ++i) { 5588 int EltIdx = MaskVals[i] * 2; 5589 if (EltIdx < 16) { 5590 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5591 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5592 continue; 5593 } 5594 pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8)); 5595 pshufbMask.push_back(DAG.getConstant(EltIdx - 15, MVT::i8)); 5596 } 5597 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V2); 5598 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 5599 DAG.getNode(ISD::BUILD_VECTOR, dl, 5600 MVT::v16i8, &pshufbMask[0], 16)); 5601 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 5602 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5603 } 5604 5605 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order, 5606 // and update MaskVals with new element order. 5607 std::bitset<8> InOrder; 5608 if (BestLoQuad >= 0) { 5609 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 }; 5610 for (int i = 0; i != 4; ++i) { 5611 int idx = MaskVals[i]; 5612 if (idx < 0) { 5613 InOrder.set(i); 5614 } else if ((idx / 4) == BestLoQuad) { 5615 MaskV[i] = idx & 3; 5616 InOrder.set(i); 5617 } 5618 } 5619 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 5620 &MaskV[0]); 5621 5622 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) 5623 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16, 5624 NewV.getOperand(0), 5625 X86::getShufflePSHUFLWImmediate(NewV.getNode()), 5626 DAG); 5627 } 5628 5629 // If BestHi >= 0, generate a pshufhw to put the high elements in order, 5630 // and update MaskVals with the new element order. 5631 if (BestHiQuad >= 0) { 5632 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 }; 5633 for (unsigned i = 4; i != 8; ++i) { 5634 int idx = MaskVals[i]; 5635 if (idx < 0) { 5636 InOrder.set(i); 5637 } else if ((idx / 4) == BestHiQuad) { 5638 MaskV[i] = (idx & 3) + 4; 5639 InOrder.set(i); 5640 } 5641 } 5642 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 5643 &MaskV[0]); 5644 5645 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) 5646 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16, 5647 NewV.getOperand(0), 5648 X86::getShufflePSHUFHWImmediate(NewV.getNode()), 5649 DAG); 5650 } 5651 5652 // In case BestHi & BestLo were both -1, which means each quadword has a word 5653 // from each of the four input quadwords, calculate the InOrder bitvector now 5654 // before falling through to the insert/extract cleanup. 5655 if (BestLoQuad == -1 && BestHiQuad == -1) { 5656 NewV = V1; 5657 for (int i = 0; i != 8; ++i) 5658 if (MaskVals[i] < 0 || MaskVals[i] == i) 5659 InOrder.set(i); 5660 } 5661 5662 // The other elements are put in the right place using pextrw and pinsrw. 5663 for (unsigned i = 0; i != 8; ++i) { 5664 if (InOrder[i]) 5665 continue; 5666 int EltIdx = MaskVals[i]; 5667 if (EltIdx < 0) 5668 continue; 5669 SDValue ExtOp = (EltIdx < 8) 5670 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1, 5671 DAG.getIntPtrConstant(EltIdx)) 5672 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2, 5673 DAG.getIntPtrConstant(EltIdx - 8)); 5674 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp, 5675 DAG.getIntPtrConstant(i)); 5676 } 5677 return NewV; 5678} 5679 5680// v16i8 shuffles - Prefer shuffles in the following order: 5681// 1. [ssse3] 1 x pshufb 5682// 2. [ssse3] 2 x pshufb + 1 x por 5683// 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw 5684static 5685SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, 5686 SelectionDAG &DAG, 5687 const X86TargetLowering &TLI) { 5688 SDValue V1 = SVOp->getOperand(0); 5689 SDValue V2 = SVOp->getOperand(1); 5690 DebugLoc dl = SVOp->getDebugLoc(); 5691 ArrayRef<int> MaskVals = SVOp->getMask(); 5692 5693 // If we have SSSE3, case 1 is generated when all result bytes come from 5694 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is 5695 // present, fall back to case 3. 5696 // FIXME: kill V2Only once shuffles are canonizalized by getNode. 5697 bool V1Only = true; 5698 bool V2Only = true; 5699 for (unsigned i = 0; i < 16; ++i) { 5700 int EltIdx = MaskVals[i]; 5701 if (EltIdx < 0) 5702 continue; 5703 if (EltIdx < 16) 5704 V2Only = false; 5705 else 5706 V1Only = false; 5707 } 5708 5709 // If SSSE3, use 1 pshufb instruction per vector with elements in the result. 5710 if (TLI.getSubtarget()->hasSSSE3()) { 5711 SmallVector<SDValue,16> pshufbMask; 5712 5713 // If all result elements are from one input vector, then only translate 5714 // undef mask values to 0x80 (zero out result) in the pshufb mask. 5715 // 5716 // Otherwise, we have elements from both input vectors, and must zero out 5717 // elements that come from V2 in the first mask, and V1 in the second mask 5718 // so that we can OR them together. 5719 bool TwoInputs = !(V1Only || V2Only); 5720 for (unsigned i = 0; i != 16; ++i) { 5721 int EltIdx = MaskVals[i]; 5722 if (EltIdx < 0 || (TwoInputs && EltIdx >= 16)) { 5723 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5724 continue; 5725 } 5726 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 5727 } 5728 // If all the elements are from V2, assign it to V1 and return after 5729 // building the first pshufb. 5730 if (V2Only) 5731 V1 = V2; 5732 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 5733 DAG.getNode(ISD::BUILD_VECTOR, dl, 5734 MVT::v16i8, &pshufbMask[0], 16)); 5735 if (!TwoInputs) 5736 return V1; 5737 5738 // Calculate the shuffle mask for the second input, shuffle it, and 5739 // OR it with the first shuffled input. 5740 pshufbMask.clear(); 5741 for (unsigned i = 0; i != 16; ++i) { 5742 int EltIdx = MaskVals[i]; 5743 if (EltIdx < 16) { 5744 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5745 continue; 5746 } 5747 pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8)); 5748 } 5749 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 5750 DAG.getNode(ISD::BUILD_VECTOR, dl, 5751 MVT::v16i8, &pshufbMask[0], 16)); 5752 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 5753 } 5754 5755 // No SSSE3 - Calculate in place words and then fix all out of place words 5756 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from 5757 // the 16 different words that comprise the two doublequadword input vectors. 5758 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5759 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 5760 SDValue NewV = V2Only ? V2 : V1; 5761 for (int i = 0; i != 8; ++i) { 5762 int Elt0 = MaskVals[i*2]; 5763 int Elt1 = MaskVals[i*2+1]; 5764 5765 // This word of the result is all undef, skip it. 5766 if (Elt0 < 0 && Elt1 < 0) 5767 continue; 5768 5769 // This word of the result is already in the correct place, skip it. 5770 if (V1Only && (Elt0 == i*2) && (Elt1 == i*2+1)) 5771 continue; 5772 if (V2Only && (Elt0 == i*2+16) && (Elt1 == i*2+17)) 5773 continue; 5774 5775 SDValue Elt0Src = Elt0 < 16 ? V1 : V2; 5776 SDValue Elt1Src = Elt1 < 16 ? V1 : V2; 5777 SDValue InsElt; 5778 5779 // If Elt0 and Elt1 are defined, are consecutive, and can be load 5780 // using a single extract together, load it and store it. 5781 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) { 5782 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 5783 DAG.getIntPtrConstant(Elt1 / 2)); 5784 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 5785 DAG.getIntPtrConstant(i)); 5786 continue; 5787 } 5788 5789 // If Elt1 is defined, extract it from the appropriate source. If the 5790 // source byte is not also odd, shift the extracted word left 8 bits 5791 // otherwise clear the bottom 8 bits if we need to do an or. 5792 if (Elt1 >= 0) { 5793 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 5794 DAG.getIntPtrConstant(Elt1 / 2)); 5795 if ((Elt1 & 1) == 0) 5796 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt, 5797 DAG.getConstant(8, 5798 TLI.getShiftAmountTy(InsElt.getValueType()))); 5799 else if (Elt0 >= 0) 5800 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt, 5801 DAG.getConstant(0xFF00, MVT::i16)); 5802 } 5803 // If Elt0 is defined, extract it from the appropriate source. If the 5804 // source byte is not also even, shift the extracted word right 8 bits. If 5805 // Elt1 was also defined, OR the extracted values together before 5806 // inserting them in the result. 5807 if (Elt0 >= 0) { 5808 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, 5809 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2)); 5810 if ((Elt0 & 1) != 0) 5811 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0, 5812 DAG.getConstant(8, 5813 TLI.getShiftAmountTy(InsElt0.getValueType()))); 5814 else if (Elt1 >= 0) 5815 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0, 5816 DAG.getConstant(0x00FF, MVT::i16)); 5817 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0) 5818 : InsElt0; 5819 } 5820 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 5821 DAG.getIntPtrConstant(i)); 5822 } 5823 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV); 5824} 5825 5826/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 5827/// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be 5828/// done when every pair / quad of shuffle mask elements point to elements in 5829/// the right sequence. e.g. 5830/// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15> 5831static 5832SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, 5833 SelectionDAG &DAG, DebugLoc dl) { 5834 EVT VT = SVOp->getValueType(0); 5835 SDValue V1 = SVOp->getOperand(0); 5836 SDValue V2 = SVOp->getOperand(1); 5837 unsigned NumElems = VT.getVectorNumElements(); 5838 unsigned NewWidth = (NumElems == 4) ? 2 : 4; 5839 EVT NewVT; 5840 switch (VT.getSimpleVT().SimpleTy) { 5841 default: assert(false && "Unexpected!"); 5842 case MVT::v4f32: NewVT = MVT::v2f64; break; 5843 case MVT::v4i32: NewVT = MVT::v2i64; break; 5844 case MVT::v8i16: NewVT = MVT::v4i32; break; 5845 case MVT::v16i8: NewVT = MVT::v4i32; break; 5846 } 5847 5848 int Scale = NumElems / NewWidth; 5849 SmallVector<int, 8> MaskVec; 5850 for (unsigned i = 0; i < NumElems; i += Scale) { 5851 int StartIdx = -1; 5852 for (int j = 0; j < Scale; ++j) { 5853 int EltIdx = SVOp->getMaskElt(i+j); 5854 if (EltIdx < 0) 5855 continue; 5856 if (StartIdx == -1) 5857 StartIdx = EltIdx - (EltIdx % Scale); 5858 if (EltIdx != StartIdx + j) 5859 return SDValue(); 5860 } 5861 if (StartIdx == -1) 5862 MaskVec.push_back(-1); 5863 else 5864 MaskVec.push_back(StartIdx / Scale); 5865 } 5866 5867 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1); 5868 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2); 5869 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]); 5870} 5871 5872/// getVZextMovL - Return a zero-extending vector move low node. 5873/// 5874static SDValue getVZextMovL(EVT VT, EVT OpVT, 5875 SDValue SrcOp, SelectionDAG &DAG, 5876 const X86Subtarget *Subtarget, DebugLoc dl) { 5877 if (VT == MVT::v2f64 || VT == MVT::v4f32) { 5878 LoadSDNode *LD = NULL; 5879 if (!isScalarLoadToVector(SrcOp.getNode(), &LD)) 5880 LD = dyn_cast<LoadSDNode>(SrcOp); 5881 if (!LD) { 5882 // movssrr and movsdrr do not clear top bits. Try to use movd, movq 5883 // instead. 5884 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; 5885 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) && 5886 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR && 5887 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST && 5888 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) { 5889 // PR2108 5890 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32; 5891 return DAG.getNode(ISD::BITCAST, dl, VT, 5892 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 5893 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 5894 OpVT, 5895 SrcOp.getOperand(0) 5896 .getOperand(0)))); 5897 } 5898 } 5899 } 5900 5901 return DAG.getNode(ISD::BITCAST, dl, VT, 5902 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 5903 DAG.getNode(ISD::BITCAST, dl, 5904 OpVT, SrcOp))); 5905} 5906 5907/// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles 5908/// which could not be matched by any known target speficic shuffle 5909static SDValue 5910LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 5911 EVT VT = SVOp->getValueType(0); 5912 5913 unsigned NumElems = VT.getVectorNumElements(); 5914 unsigned NumLaneElems = NumElems / 2; 5915 5916 int MinRange[2][2] = { { static_cast<int>(NumElems), 5917 static_cast<int>(NumElems) }, 5918 { static_cast<int>(NumElems), 5919 static_cast<int>(NumElems) } }; 5920 int MaxRange[2][2] = { { -1, -1 }, { -1, -1 } }; 5921 5922 // Collect used ranges for each source in each lane 5923 for (unsigned l = 0; l < 2; ++l) { 5924 unsigned LaneStart = l*NumLaneElems; 5925 for (unsigned i = 0; i != NumLaneElems; ++i) { 5926 int Idx = SVOp->getMaskElt(i+LaneStart); 5927 if (Idx < 0) 5928 continue; 5929 5930 int Input = 0; 5931 if (Idx >= (int)NumElems) { 5932 Idx -= NumElems; 5933 Input = 1; 5934 } 5935 5936 if (Idx > MaxRange[l][Input]) 5937 MaxRange[l][Input] = Idx; 5938 if (Idx < MinRange[l][Input]) 5939 MinRange[l][Input] = Idx; 5940 } 5941 } 5942 5943 // Make sure each range is 128-bits 5944 int ExtractIdx[2][2] = { { -1, -1 }, { -1, -1 } }; 5945 for (unsigned l = 0; l < 2; ++l) { 5946 for (unsigned Input = 0; Input < 2; ++Input) { 5947 if (MinRange[l][Input] == (int)NumElems && MaxRange[l][Input] < 0) 5948 continue; 5949 5950 if (MinRange[l][Input] >= 0 && MaxRange[l][Input] < (int)NumLaneElems) 5951 ExtractIdx[l][Input] = 0; 5952 else if (MinRange[l][Input] >= (int)NumLaneElems && 5953 MaxRange[l][Input] < (int)NumElems) 5954 ExtractIdx[l][Input] = NumLaneElems; 5955 else 5956 return SDValue(); 5957 } 5958 } 5959 5960 DebugLoc dl = SVOp->getDebugLoc(); 5961 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 5962 EVT NVT = MVT::getVectorVT(EltVT, NumElems/2); 5963 5964 SDValue Ops[2][2]; 5965 for (unsigned l = 0; l < 2; ++l) { 5966 for (unsigned Input = 0; Input < 2; ++Input) { 5967 if (ExtractIdx[l][Input] >= 0) 5968 Ops[l][Input] = Extract128BitVector(SVOp->getOperand(Input), 5969 DAG.getConstant(ExtractIdx[l][Input], MVT::i32), 5970 DAG, dl); 5971 else 5972 Ops[l][Input] = DAG.getUNDEF(NVT); 5973 } 5974 } 5975 5976 // Generate 128-bit shuffles 5977 SmallVector<int, 16> Mask1, Mask2; 5978 for (unsigned i = 0; i != NumLaneElems; ++i) { 5979 int Elt = SVOp->getMaskElt(i); 5980 if (Elt >= (int)NumElems) { 5981 Elt %= NumLaneElems; 5982 Elt += NumLaneElems; 5983 } else if (Elt >= 0) { 5984 Elt %= NumLaneElems; 5985 } 5986 Mask1.push_back(Elt); 5987 } 5988 for (unsigned i = NumLaneElems; i != NumElems; ++i) { 5989 int Elt = SVOp->getMaskElt(i); 5990 if (Elt >= (int)NumElems) { 5991 Elt %= NumLaneElems; 5992 Elt += NumLaneElems; 5993 } else if (Elt >= 0) { 5994 Elt %= NumLaneElems; 5995 } 5996 Mask2.push_back(Elt); 5997 } 5998 5999 SDValue Shuf1 = DAG.getVectorShuffle(NVT, dl, Ops[0][0], Ops[0][1], &Mask1[0]); 6000 SDValue Shuf2 = DAG.getVectorShuffle(NVT, dl, Ops[1][0], Ops[1][1], &Mask2[0]); 6001 6002 // Concatenate the result back 6003 SDValue V = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), Shuf1, 6004 DAG.getConstant(0, MVT::i32), DAG, dl); 6005 return Insert128BitVector(V, Shuf2, DAG.getConstant(NumElems/2, MVT::i32), 6006 DAG, dl); 6007} 6008 6009/// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with 6010/// 4 elements, and match them with several different shuffle types. 6011static SDValue 6012LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 6013 SDValue V1 = SVOp->getOperand(0); 6014 SDValue V2 = SVOp->getOperand(1); 6015 DebugLoc dl = SVOp->getDebugLoc(); 6016 EVT VT = SVOp->getValueType(0); 6017 6018 assert(VT.getSizeInBits() == 128 && "Unsupported vector size"); 6019 6020 std::pair<int, int> Locs[4]; 6021 int Mask1[] = { -1, -1, -1, -1 }; 6022 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end()); 6023 6024 unsigned NumHi = 0; 6025 unsigned NumLo = 0; 6026 for (unsigned i = 0; i != 4; ++i) { 6027 int Idx = PermMask[i]; 6028 if (Idx < 0) { 6029 Locs[i] = std::make_pair(-1, -1); 6030 } else { 6031 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!"); 6032 if (Idx < 4) { 6033 Locs[i] = std::make_pair(0, NumLo); 6034 Mask1[NumLo] = Idx; 6035 NumLo++; 6036 } else { 6037 Locs[i] = std::make_pair(1, NumHi); 6038 if (2+NumHi < 4) 6039 Mask1[2+NumHi] = Idx; 6040 NumHi++; 6041 } 6042 } 6043 } 6044 6045 if (NumLo <= 2 && NumHi <= 2) { 6046 // If no more than two elements come from either vector. This can be 6047 // implemented with two shuffles. First shuffle gather the elements. 6048 // The second shuffle, which takes the first shuffle as both of its 6049 // vector operands, put the elements into the right order. 6050 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6051 6052 int Mask2[] = { -1, -1, -1, -1 }; 6053 6054 for (unsigned i = 0; i != 4; ++i) 6055 if (Locs[i].first != -1) { 6056 unsigned Idx = (i < 2) ? 0 : 4; 6057 Idx += Locs[i].first * 2 + Locs[i].second; 6058 Mask2[i] = Idx; 6059 } 6060 6061 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]); 6062 } else if (NumLo == 3 || NumHi == 3) { 6063 // Otherwise, we must have three elements from one vector, call it X, and 6064 // one element from the other, call it Y. First, use a shufps to build an 6065 // intermediate vector with the one element from Y and the element from X 6066 // that will be in the same half in the final destination (the indexes don't 6067 // matter). Then, use a shufps to build the final vector, taking the half 6068 // containing the element from Y from the intermediate, and the other half 6069 // from X. 6070 if (NumHi == 3) { 6071 // Normalize it so the 3 elements come from V1. 6072 CommuteVectorShuffleMask(PermMask, 4); 6073 std::swap(V1, V2); 6074 } 6075 6076 // Find the element from V2. 6077 unsigned HiIndex; 6078 for (HiIndex = 0; HiIndex < 3; ++HiIndex) { 6079 int Val = PermMask[HiIndex]; 6080 if (Val < 0) 6081 continue; 6082 if (Val >= 4) 6083 break; 6084 } 6085 6086 Mask1[0] = PermMask[HiIndex]; 6087 Mask1[1] = -1; 6088 Mask1[2] = PermMask[HiIndex^1]; 6089 Mask1[3] = -1; 6090 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6091 6092 if (HiIndex >= 2) { 6093 Mask1[0] = PermMask[0]; 6094 Mask1[1] = PermMask[1]; 6095 Mask1[2] = HiIndex & 1 ? 6 : 4; 6096 Mask1[3] = HiIndex & 1 ? 4 : 6; 6097 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6098 } else { 6099 Mask1[0] = HiIndex & 1 ? 2 : 0; 6100 Mask1[1] = HiIndex & 1 ? 0 : 2; 6101 Mask1[2] = PermMask[2]; 6102 Mask1[3] = PermMask[3]; 6103 if (Mask1[2] >= 0) 6104 Mask1[2] += 4; 6105 if (Mask1[3] >= 0) 6106 Mask1[3] += 4; 6107 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]); 6108 } 6109 } 6110 6111 // Break it into (shuffle shuffle_hi, shuffle_lo). 6112 int LoMask[] = { -1, -1, -1, -1 }; 6113 int HiMask[] = { -1, -1, -1, -1 }; 6114 6115 int *MaskPtr = LoMask; 6116 unsigned MaskIdx = 0; 6117 unsigned LoIdx = 0; 6118 unsigned HiIdx = 2; 6119 for (unsigned i = 0; i != 4; ++i) { 6120 if (i == 2) { 6121 MaskPtr = HiMask; 6122 MaskIdx = 1; 6123 LoIdx = 0; 6124 HiIdx = 2; 6125 } 6126 int Idx = PermMask[i]; 6127 if (Idx < 0) { 6128 Locs[i] = std::make_pair(-1, -1); 6129 } else if (Idx < 4) { 6130 Locs[i] = std::make_pair(MaskIdx, LoIdx); 6131 MaskPtr[LoIdx] = Idx; 6132 LoIdx++; 6133 } else { 6134 Locs[i] = std::make_pair(MaskIdx, HiIdx); 6135 MaskPtr[HiIdx] = Idx; 6136 HiIdx++; 6137 } 6138 } 6139 6140 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]); 6141 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]); 6142 int MaskOps[] = { -1, -1, -1, -1 }; 6143 for (unsigned i = 0; i != 4; ++i) 6144 if (Locs[i].first != -1) 6145 MaskOps[i] = Locs[i].first * 4 + Locs[i].second; 6146 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]); 6147} 6148 6149static bool MayFoldVectorLoad(SDValue V) { 6150 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 6151 V = V.getOperand(0); 6152 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 6153 V = V.getOperand(0); 6154 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR && 6155 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF) 6156 // BUILD_VECTOR (load), undef 6157 V = V.getOperand(0); 6158 if (MayFoldLoad(V)) 6159 return true; 6160 return false; 6161} 6162 6163// FIXME: the version above should always be used. Since there's 6164// a bug where several vector shuffles can't be folded because the 6165// DAG is not updated during lowering and a node claims to have two 6166// uses while it only has one, use this version, and let isel match 6167// another instruction if the load really happens to have more than 6168// one use. Remove this version after this bug get fixed. 6169// rdar://8434668, PR8156 6170static bool RelaxedMayFoldVectorLoad(SDValue V) { 6171 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 6172 V = V.getOperand(0); 6173 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 6174 V = V.getOperand(0); 6175 if (ISD::isNormalLoad(V.getNode())) 6176 return true; 6177 return false; 6178} 6179 6180/// CanFoldShuffleIntoVExtract - Check if the current shuffle is used by 6181/// a vector extract, and if both can be later optimized into a single load. 6182/// This is done in visitEXTRACT_VECTOR_ELT and the conditions are checked 6183/// here because otherwise a target specific shuffle node is going to be 6184/// emitted for this shuffle, and the optimization not done. 6185/// FIXME: This is probably not the best approach, but fix the problem 6186/// until the right path is decided. 6187static 6188bool CanXFormVExtractWithShuffleIntoLoad(SDValue V, SelectionDAG &DAG, 6189 const TargetLowering &TLI) { 6190 EVT VT = V.getValueType(); 6191 ShuffleVectorSDNode *SVOp = dyn_cast<ShuffleVectorSDNode>(V); 6192 6193 // Be sure that the vector shuffle is present in a pattern like this: 6194 // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), c) -> (f32 load $addr) 6195 if (!V.hasOneUse()) 6196 return false; 6197 6198 SDNode *N = *V.getNode()->use_begin(); 6199 if (N->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 6200 return false; 6201 6202 SDValue EltNo = N->getOperand(1); 6203 if (!isa<ConstantSDNode>(EltNo)) 6204 return false; 6205 6206 // If the bit convert changed the number of elements, it is unsafe 6207 // to examine the mask. 6208 bool HasShuffleIntoBitcast = false; 6209 if (V.getOpcode() == ISD::BITCAST) { 6210 EVT SrcVT = V.getOperand(0).getValueType(); 6211 if (SrcVT.getVectorNumElements() != VT.getVectorNumElements()) 6212 return false; 6213 V = V.getOperand(0); 6214 HasShuffleIntoBitcast = true; 6215 } 6216 6217 // Select the input vector, guarding against out of range extract vector. 6218 unsigned NumElems = VT.getVectorNumElements(); 6219 unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 6220 int Idx = (Elt > NumElems) ? -1 : SVOp->getMaskElt(Elt); 6221 V = (Idx < (int)NumElems) ? V.getOperand(0) : V.getOperand(1); 6222 6223 // If we are accessing the upper part of a YMM register 6224 // then the EXTRACT_VECTOR_ELT is likely to be legalized to a sequence of 6225 // EXTRACT_SUBVECTOR + EXTRACT_VECTOR_ELT, which are not detected at this point 6226 // because the legalization of N did not happen yet. 6227 if (Idx >= (int)NumElems/2 && VT.getSizeInBits() == 256) 6228 return false; 6229 6230 // Skip one more bit_convert if necessary 6231 if (V.getOpcode() == ISD::BITCAST) 6232 V = V.getOperand(0); 6233 6234 if (!ISD::isNormalLoad(V.getNode())) 6235 return false; 6236 6237 // Is the original load suitable? 6238 LoadSDNode *LN0 = cast<LoadSDNode>(V); 6239 6240 if (!LN0 || !LN0->hasNUsesOfValue(1,0) || LN0->isVolatile()) 6241 return false; 6242 6243 if (!HasShuffleIntoBitcast) 6244 return true; 6245 6246 // If there's a bitcast before the shuffle, check if the load type and 6247 // alignment is valid. 6248 unsigned Align = LN0->getAlignment(); 6249 unsigned NewAlign = 6250 TLI.getTargetData()->getABITypeAlignment( 6251 VT.getTypeForEVT(*DAG.getContext())); 6252 6253 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT)) 6254 return false; 6255 6256 return true; 6257} 6258 6259static 6260SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) { 6261 EVT VT = Op.getValueType(); 6262 6263 // Canonizalize to v2f64. 6264 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 6265 return DAG.getNode(ISD::BITCAST, dl, VT, 6266 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64, 6267 V1, DAG)); 6268} 6269 6270static 6271SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, 6272 bool HasSSE2) { 6273 SDValue V1 = Op.getOperand(0); 6274 SDValue V2 = Op.getOperand(1); 6275 EVT VT = Op.getValueType(); 6276 6277 assert(VT != MVT::v2i64 && "unsupported shuffle type"); 6278 6279 if (HasSSE2 && VT == MVT::v2f64) 6280 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG); 6281 6282 // v4f32 or v4i32: canonizalized to v4f32 (which is legal for SSE1) 6283 return DAG.getNode(ISD::BITCAST, dl, VT, 6284 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32, 6285 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1), 6286 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG)); 6287} 6288 6289static 6290SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) { 6291 SDValue V1 = Op.getOperand(0); 6292 SDValue V2 = Op.getOperand(1); 6293 EVT VT = Op.getValueType(); 6294 6295 assert((VT == MVT::v4i32 || VT == MVT::v4f32) && 6296 "unsupported shuffle type"); 6297 6298 if (V2.getOpcode() == ISD::UNDEF) 6299 V2 = V1; 6300 6301 // v4i32 or v4f32 6302 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG); 6303} 6304 6305static 6306SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) { 6307 SDValue V1 = Op.getOperand(0); 6308 SDValue V2 = Op.getOperand(1); 6309 EVT VT = Op.getValueType(); 6310 unsigned NumElems = VT.getVectorNumElements(); 6311 6312 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second 6313 // operand of these instructions is only memory, so check if there's a 6314 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the 6315 // same masks. 6316 bool CanFoldLoad = false; 6317 6318 // Trivial case, when V2 comes from a load. 6319 if (MayFoldVectorLoad(V2)) 6320 CanFoldLoad = true; 6321 6322 // When V1 is a load, it can be folded later into a store in isel, example: 6323 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1) 6324 // turns into: 6325 // (MOVLPSmr addr:$src1, VR128:$src2) 6326 // So, recognize this potential and also use MOVLPS or MOVLPD 6327 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op)) 6328 CanFoldLoad = true; 6329 6330 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6331 if (CanFoldLoad) { 6332 if (HasSSE2 && NumElems == 2) 6333 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG); 6334 6335 if (NumElems == 4) 6336 // If we don't care about the second element, procede to use movss. 6337 if (SVOp->getMaskElt(1) != -1) 6338 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG); 6339 } 6340 6341 // movl and movlp will both match v2i64, but v2i64 is never matched by 6342 // movl earlier because we make it strict to avoid messing with the movlp load 6343 // folding logic (see the code above getMOVLP call). Match it here then, 6344 // this is horrible, but will stay like this until we move all shuffle 6345 // matching to x86 specific nodes. Note that for the 1st condition all 6346 // types are matched with movsd. 6347 if (HasSSE2) { 6348 // FIXME: isMOVLMask should be checked and matched before getMOVLP, 6349 // as to remove this logic from here, as much as possible 6350 if (NumElems == 2 || !X86::isMOVLMask(SVOp)) 6351 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 6352 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 6353 } 6354 6355 assert(VT != MVT::v4i32 && "unsupported shuffle type"); 6356 6357 // Invert the operand order and use SHUFPS to match it. 6358 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1, 6359 X86::getShuffleSHUFImmediate(SVOp), DAG); 6360} 6361 6362static 6363SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG, 6364 const TargetLowering &TLI, 6365 const X86Subtarget *Subtarget) { 6366 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6367 EVT VT = Op.getValueType(); 6368 DebugLoc dl = Op.getDebugLoc(); 6369 SDValue V1 = Op.getOperand(0); 6370 SDValue V2 = Op.getOperand(1); 6371 6372 if (isZeroShuffle(SVOp)) 6373 return getZeroVector(VT, Subtarget->hasSSE2(), Subtarget->hasAVX2(), 6374 DAG, dl); 6375 6376 // Handle splat operations 6377 if (SVOp->isSplat()) { 6378 unsigned NumElem = VT.getVectorNumElements(); 6379 int Size = VT.getSizeInBits(); 6380 // Special case, this is the only place now where it's allowed to return 6381 // a vector_shuffle operation without using a target specific node, because 6382 // *hopefully* it will be optimized away by the dag combiner. FIXME: should 6383 // this be moved to DAGCombine instead? 6384 if (NumElem <= 4 && CanXFormVExtractWithShuffleIntoLoad(Op, DAG, TLI)) 6385 return Op; 6386 6387 // Use vbroadcast whenever the splat comes from a foldable load 6388 SDValue LD = isVectorBroadcast(Op, Subtarget); 6389 if (LD.getNode()) 6390 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, LD); 6391 6392 // Handle splats by matching through known shuffle masks 6393 if ((Size == 128 && NumElem <= 4) || 6394 (Size == 256 && NumElem < 8)) 6395 return SDValue(); 6396 6397 // All remaning splats are promoted to target supported vector shuffles. 6398 return PromoteSplat(SVOp, DAG); 6399 } 6400 6401 // If the shuffle can be profitably rewritten as a narrower shuffle, then 6402 // do it! 6403 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 6404 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6405 if (NewOp.getNode()) 6406 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp); 6407 } else if ((VT == MVT::v4i32 || 6408 (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 6409 // FIXME: Figure out a cleaner way to do this. 6410 // Try to make use of movq to zero out the top part. 6411 if (ISD::isBuildVectorAllZeros(V2.getNode())) { 6412 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6413 if (NewOp.getNode()) { 6414 if (isCommutedMOVL(cast<ShuffleVectorSDNode>(NewOp), true, false)) 6415 return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(0), 6416 DAG, Subtarget, dl); 6417 } 6418 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) { 6419 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6420 if (NewOp.getNode() && X86::isMOVLMask(cast<ShuffleVectorSDNode>(NewOp))) 6421 return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(1), 6422 DAG, Subtarget, dl); 6423 } 6424 } 6425 return SDValue(); 6426} 6427 6428SDValue 6429X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { 6430 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6431 SDValue V1 = Op.getOperand(0); 6432 SDValue V2 = Op.getOperand(1); 6433 EVT VT = Op.getValueType(); 6434 DebugLoc dl = Op.getDebugLoc(); 6435 unsigned NumElems = VT.getVectorNumElements(); 6436 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 6437 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 6438 bool V1IsSplat = false; 6439 bool V2IsSplat = false; 6440 bool HasSSE2 = Subtarget->hasSSE2(); 6441 bool HasAVX = Subtarget->hasAVX(); 6442 bool HasAVX2 = Subtarget->hasAVX2(); 6443 MachineFunction &MF = DAG.getMachineFunction(); 6444 bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize); 6445 6446 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles"); 6447 6448 if (V1IsUndef && V2IsUndef) 6449 return DAG.getUNDEF(VT); 6450 6451 assert(!V1IsUndef && "Op 1 of shuffle should not be undef"); 6452 6453 // Vector shuffle lowering takes 3 steps: 6454 // 6455 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable 6456 // narrowing and commutation of operands should be handled. 6457 // 2) Matching of shuffles with known shuffle masks to x86 target specific 6458 // shuffle nodes. 6459 // 3) Rewriting of unmatched masks into new generic shuffle operations, 6460 // so the shuffle can be broken into other shuffles and the legalizer can 6461 // try the lowering again. 6462 // 6463 // The general idea is that no vector_shuffle operation should be left to 6464 // be matched during isel, all of them must be converted to a target specific 6465 // node here. 6466 6467 // Normalize the input vectors. Here splats, zeroed vectors, profitable 6468 // narrowing and commutation of operands should be handled. The actual code 6469 // doesn't include all of those, work in progress... 6470 SDValue NewOp = NormalizeVectorShuffle(Op, DAG, *this, Subtarget); 6471 if (NewOp.getNode()) 6472 return NewOp; 6473 6474 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and 6475 // unpckh_undef). Only use pshufd if speed is more important than size. 6476 if (OptForSize && X86::isUNPCKL_v_undef_Mask(SVOp, HasAVX2)) 6477 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6478 if (OptForSize && X86::isUNPCKH_v_undef_Mask(SVOp, HasAVX2)) 6479 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6480 6481 if (X86::isMOVDDUPMask(SVOp) && Subtarget->hasSSE3() && 6482 V2IsUndef && RelaxedMayFoldVectorLoad(V1)) 6483 return getMOVDDup(Op, dl, V1, DAG); 6484 6485 if (X86::isMOVHLPS_v_undef_Mask(SVOp)) 6486 return getMOVHighToLow(Op, dl, DAG); 6487 6488 // Use to match splats 6489 if (HasSSE2 && X86::isUNPCKHMask(SVOp, HasAVX2) && V2IsUndef && 6490 (VT == MVT::v2f64 || VT == MVT::v2i64)) 6491 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6492 6493 if (X86::isPSHUFDMask(SVOp)) { 6494 // The actual implementation will match the mask in the if above and then 6495 // during isel it can match several different instructions, not only pshufd 6496 // as its name says, sad but true, emulate the behavior for now... 6497 if (X86::isMOVDDUPMask(SVOp) && ((VT == MVT::v4f32 || VT == MVT::v2i64))) 6498 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG); 6499 6500 unsigned TargetMask = X86::getShuffleSHUFImmediate(SVOp); 6501 6502 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32)) 6503 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG); 6504 6505 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1, 6506 TargetMask, DAG); 6507 } 6508 6509 // Check if this can be converted into a logical shift. 6510 bool isLeft = false; 6511 unsigned ShAmt = 0; 6512 SDValue ShVal; 6513 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt); 6514 if (isShift && ShVal.hasOneUse()) { 6515 // If the shifted value has multiple uses, it may be cheaper to use 6516 // v_set0 + movlhps or movhlps, etc. 6517 EVT EltVT = VT.getVectorElementType(); 6518 ShAmt *= EltVT.getSizeInBits(); 6519 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 6520 } 6521 6522 if (X86::isMOVLMask(SVOp)) { 6523 if (ISD::isBuildVectorAllZeros(V1.getNode())) 6524 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl); 6525 if (!X86::isMOVLPMask(SVOp)) { 6526 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) 6527 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 6528 6529 if (VT == MVT::v4i32 || VT == MVT::v4f32) 6530 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 6531 } 6532 } 6533 6534 // FIXME: fold these into legal mask. 6535 if (X86::isMOVLHPSMask(SVOp) && !X86::isUNPCKLMask(SVOp, HasAVX2)) 6536 return getMOVLowToHigh(Op, dl, DAG, HasSSE2); 6537 6538 if (X86::isMOVHLPSMask(SVOp)) 6539 return getMOVHighToLow(Op, dl, DAG); 6540 6541 if (X86::isMOVSHDUPMask(SVOp, Subtarget)) 6542 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG); 6543 6544 if (X86::isMOVSLDUPMask(SVOp, Subtarget)) 6545 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG); 6546 6547 if (X86::isMOVLPMask(SVOp)) 6548 return getMOVLP(Op, dl, DAG, HasSSE2); 6549 6550 if (ShouldXformToMOVHLPS(SVOp) || 6551 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), SVOp)) 6552 return CommuteVectorShuffle(SVOp, DAG); 6553 6554 if (isShift) { 6555 // No better options. Use a vshldq / vsrldq. 6556 EVT EltVT = VT.getVectorElementType(); 6557 ShAmt *= EltVT.getSizeInBits(); 6558 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 6559 } 6560 6561 bool Commuted = false; 6562 // FIXME: This should also accept a bitcast of a splat? Be careful, not 6563 // 1,1,1,1 -> v8i16 though. 6564 V1IsSplat = isSplatVector(V1.getNode()); 6565 V2IsSplat = isSplatVector(V2.getNode()); 6566 6567 // Canonicalize the splat or undef, if present, to be on the RHS. 6568 if (V1IsSplat && !V2IsSplat) { 6569 Op = CommuteVectorShuffle(SVOp, DAG); 6570 SVOp = cast<ShuffleVectorSDNode>(Op); 6571 V1 = SVOp->getOperand(0); 6572 V2 = SVOp->getOperand(1); 6573 std::swap(V1IsSplat, V2IsSplat); 6574 Commuted = true; 6575 } 6576 6577 ArrayRef<int> M = SVOp->getMask(); 6578 6579 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) { 6580 // Shuffling low element of v1 into undef, just return v1. 6581 if (V2IsUndef) 6582 return V1; 6583 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which 6584 // the instruction selector will not match, so get a canonical MOVL with 6585 // swapped operands to undo the commute. 6586 return getMOVL(DAG, dl, VT, V2, V1); 6587 } 6588 6589 if (isUNPCKLMask(M, VT, HasAVX2)) 6590 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 6591 6592 if (isUNPCKHMask(M, VT, HasAVX2)) 6593 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 6594 6595 if (V2IsSplat) { 6596 // Normalize mask so all entries that point to V2 points to its first 6597 // element then try to match unpck{h|l} again. If match, return a 6598 // new vector_shuffle with the corrected mask. 6599 SDValue NewMask = NormalizeMask(SVOp, DAG); 6600 ShuffleVectorSDNode *NSVOp = cast<ShuffleVectorSDNode>(NewMask); 6601 if (NSVOp != SVOp) { 6602 if (X86::isUNPCKLMask(NSVOp, HasAVX2, true)) { 6603 return NewMask; 6604 } else if (X86::isUNPCKHMask(NSVOp, HasAVX2, true)) { 6605 return NewMask; 6606 } 6607 } 6608 } 6609 6610 if (Commuted) { 6611 // Commute is back and try unpck* again. 6612 // FIXME: this seems wrong. 6613 SDValue NewOp = CommuteVectorShuffle(SVOp, DAG); 6614 ShuffleVectorSDNode *NewSVOp = cast<ShuffleVectorSDNode>(NewOp); 6615 6616 if (X86::isUNPCKLMask(NewSVOp, HasAVX2)) 6617 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V2, V1, DAG); 6618 6619 if (X86::isUNPCKHMask(NewSVOp, HasAVX2)) 6620 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V2, V1, DAG); 6621 } 6622 6623 // Normalize the node to match x86 shuffle ops if needed 6624 if (!V2IsUndef && (isSHUFPMask(M, VT, HasAVX, /* Commuted */ true))) 6625 return CommuteVectorShuffle(SVOp, DAG); 6626 6627 // The checks below are all present in isShuffleMaskLegal, but they are 6628 // inlined here right now to enable us to directly emit target specific 6629 // nodes, and remove one by one until they don't return Op anymore. 6630 6631 if (isPALIGNRMask(M, VT, Subtarget)) 6632 return getTargetShuffleNode(X86ISD::PALIGN, dl, VT, V1, V2, 6633 getShufflePALIGNRImmediate(SVOp), 6634 DAG); 6635 6636 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) && 6637 SVOp->getSplatIndex() == 0 && V2IsUndef) { 6638 if (VT == MVT::v2f64 || VT == MVT::v2i64) 6639 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6640 } 6641 6642 if (isPSHUFHWMask(M, VT)) 6643 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1, 6644 X86::getShufflePSHUFHWImmediate(SVOp), 6645 DAG); 6646 6647 if (isPSHUFLWMask(M, VT)) 6648 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1, 6649 X86::getShufflePSHUFLWImmediate(SVOp), 6650 DAG); 6651 6652 if (isSHUFPMask(M, VT, HasAVX)) 6653 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2, 6654 X86::getShuffleSHUFImmediate(SVOp), DAG); 6655 6656 if (isUNPCKL_v_undef_Mask(M, VT, HasAVX2)) 6657 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6658 if (isUNPCKH_v_undef_Mask(M, VT, HasAVX2)) 6659 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6660 6661 //===--------------------------------------------------------------------===// 6662 // Generate target specific nodes for 128 or 256-bit shuffles only 6663 // supported in the AVX instruction set. 6664 // 6665 6666 // Handle VMOVDDUPY permutations 6667 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasAVX)) 6668 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG); 6669 6670 // Handle VPERMILPS/D* permutations 6671 if (isVPERMILPMask(M, VT, HasAVX)) 6672 return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1, 6673 getShuffleVPERMILPImmediate(SVOp), DAG); 6674 6675 // Handle VPERM2F128/VPERM2I128 permutations 6676 if (isVPERM2X128Mask(M, VT, HasAVX)) 6677 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1, 6678 V2, getShuffleVPERM2X128Immediate(SVOp), DAG); 6679 6680 //===--------------------------------------------------------------------===// 6681 // Since no target specific shuffle was selected for this generic one, 6682 // lower it into other known shuffles. FIXME: this isn't true yet, but 6683 // this is the plan. 6684 // 6685 6686 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 6687 if (VT == MVT::v8i16) { 6688 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, DAG); 6689 if (NewOp.getNode()) 6690 return NewOp; 6691 } 6692 6693 if (VT == MVT::v16i8) { 6694 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, DAG, *this); 6695 if (NewOp.getNode()) 6696 return NewOp; 6697 } 6698 6699 // Handle all 128-bit wide vectors with 4 elements, and match them with 6700 // several different shuffle types. 6701 if (NumElems == 4 && VT.getSizeInBits() == 128) 6702 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG); 6703 6704 // Handle general 256-bit shuffles 6705 if (VT.is256BitVector()) 6706 return LowerVECTOR_SHUFFLE_256(SVOp, DAG); 6707 6708 return SDValue(); 6709} 6710 6711SDValue 6712X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, 6713 SelectionDAG &DAG) const { 6714 EVT VT = Op.getValueType(); 6715 DebugLoc dl = Op.getDebugLoc(); 6716 6717 if (Op.getOperand(0).getValueType().getSizeInBits() != 128) 6718 return SDValue(); 6719 6720 if (VT.getSizeInBits() == 8) { 6721 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, 6722 Op.getOperand(0), Op.getOperand(1)); 6723 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 6724 DAG.getValueType(VT)); 6725 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 6726 } else if (VT.getSizeInBits() == 16) { 6727 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6728 // If Idx is 0, it's cheaper to do a move instead of a pextrw. 6729 if (Idx == 0) 6730 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 6731 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 6732 DAG.getNode(ISD::BITCAST, dl, 6733 MVT::v4i32, 6734 Op.getOperand(0)), 6735 Op.getOperand(1))); 6736 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, 6737 Op.getOperand(0), Op.getOperand(1)); 6738 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 6739 DAG.getValueType(VT)); 6740 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 6741 } else if (VT == MVT::f32) { 6742 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy 6743 // the result back to FR32 register. It's only worth matching if the 6744 // result has a single use which is a store or a bitcast to i32. And in 6745 // the case of a store, it's not worth it if the index is a constant 0, 6746 // because a MOVSSmr can be used instead, which is smaller and faster. 6747 if (!Op.hasOneUse()) 6748 return SDValue(); 6749 SDNode *User = *Op.getNode()->use_begin(); 6750 if ((User->getOpcode() != ISD::STORE || 6751 (isa<ConstantSDNode>(Op.getOperand(1)) && 6752 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) && 6753 (User->getOpcode() != ISD::BITCAST || 6754 User->getValueType(0) != MVT::i32)) 6755 return SDValue(); 6756 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 6757 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, 6758 Op.getOperand(0)), 6759 Op.getOperand(1)); 6760 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract); 6761 } else if (VT == MVT::i32 || VT == MVT::i64) { 6762 // ExtractPS/pextrq works with constant index. 6763 if (isa<ConstantSDNode>(Op.getOperand(1))) 6764 return Op; 6765 } 6766 return SDValue(); 6767} 6768 6769 6770SDValue 6771X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 6772 SelectionDAG &DAG) const { 6773 if (!isa<ConstantSDNode>(Op.getOperand(1))) 6774 return SDValue(); 6775 6776 SDValue Vec = Op.getOperand(0); 6777 EVT VecVT = Vec.getValueType(); 6778 6779 // If this is a 256-bit vector result, first extract the 128-bit vector and 6780 // then extract the element from the 128-bit vector. 6781 if (VecVT.getSizeInBits() == 256) { 6782 DebugLoc dl = Op.getNode()->getDebugLoc(); 6783 unsigned NumElems = VecVT.getVectorNumElements(); 6784 SDValue Idx = Op.getOperand(1); 6785 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 6786 6787 // Get the 128-bit vector. 6788 bool Upper = IdxVal >= NumElems/2; 6789 Vec = Extract128BitVector(Vec, 6790 DAG.getConstant(Upper ? NumElems/2 : 0, MVT::i32), DAG, dl); 6791 6792 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec, 6793 Upper ? DAG.getConstant(IdxVal-NumElems/2, MVT::i32) : Idx); 6794 } 6795 6796 assert(Vec.getValueSizeInBits() <= 128 && "Unexpected vector length"); 6797 6798 if (Subtarget->hasSSE41()) { 6799 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 6800 if (Res.getNode()) 6801 return Res; 6802 } 6803 6804 EVT VT = Op.getValueType(); 6805 DebugLoc dl = Op.getDebugLoc(); 6806 // TODO: handle v16i8. 6807 if (VT.getSizeInBits() == 16) { 6808 SDValue Vec = Op.getOperand(0); 6809 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6810 if (Idx == 0) 6811 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 6812 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 6813 DAG.getNode(ISD::BITCAST, dl, 6814 MVT::v4i32, Vec), 6815 Op.getOperand(1))); 6816 // Transform it so it match pextrw which produces a 32-bit result. 6817 EVT EltVT = MVT::i32; 6818 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, 6819 Op.getOperand(0), Op.getOperand(1)); 6820 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract, 6821 DAG.getValueType(VT)); 6822 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 6823 } else if (VT.getSizeInBits() == 32) { 6824 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6825 if (Idx == 0) 6826 return Op; 6827 6828 // SHUFPS the element to the lowest double word, then movss. 6829 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 }; 6830 EVT VVT = Op.getOperand(0).getValueType(); 6831 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 6832 DAG.getUNDEF(VVT), Mask); 6833 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 6834 DAG.getIntPtrConstant(0)); 6835 } else if (VT.getSizeInBits() == 64) { 6836 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 6837 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 6838 // to match extract_elt for f64. 6839 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6840 if (Idx == 0) 6841 return Op; 6842 6843 // UNPCKHPD the element to the lowest double word, then movsd. 6844 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 6845 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 6846 int Mask[2] = { 1, -1 }; 6847 EVT VVT = Op.getOperand(0).getValueType(); 6848 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 6849 DAG.getUNDEF(VVT), Mask); 6850 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 6851 DAG.getIntPtrConstant(0)); 6852 } 6853 6854 return SDValue(); 6855} 6856 6857SDValue 6858X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, 6859 SelectionDAG &DAG) const { 6860 EVT VT = Op.getValueType(); 6861 EVT EltVT = VT.getVectorElementType(); 6862 DebugLoc dl = Op.getDebugLoc(); 6863 6864 SDValue N0 = Op.getOperand(0); 6865 SDValue N1 = Op.getOperand(1); 6866 SDValue N2 = Op.getOperand(2); 6867 6868 if (VT.getSizeInBits() == 256) 6869 return SDValue(); 6870 6871 if ((EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) && 6872 isa<ConstantSDNode>(N2)) { 6873 unsigned Opc; 6874 if (VT == MVT::v8i16) 6875 Opc = X86ISD::PINSRW; 6876 else if (VT == MVT::v16i8) 6877 Opc = X86ISD::PINSRB; 6878 else 6879 Opc = X86ISD::PINSRB; 6880 6881 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 6882 // argument. 6883 if (N1.getValueType() != MVT::i32) 6884 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 6885 if (N2.getValueType() != MVT::i32) 6886 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 6887 return DAG.getNode(Opc, dl, VT, N0, N1, N2); 6888 } else if (EltVT == MVT::f32 && isa<ConstantSDNode>(N2)) { 6889 // Bits [7:6] of the constant are the source select. This will always be 6890 // zero here. The DAG Combiner may combine an extract_elt index into these 6891 // bits. For example (insert (extract, 3), 2) could be matched by putting 6892 // the '3' into bits [7:6] of X86ISD::INSERTPS. 6893 // Bits [5:4] of the constant are the destination select. This is the 6894 // value of the incoming immediate. 6895 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 6896 // combine either bitwise AND or insert of float 0.0 to set these bits. 6897 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4); 6898 // Create this as a scalar to vector.. 6899 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1); 6900 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2); 6901 } else if ((EltVT == MVT::i32 || EltVT == MVT::i64) && 6902 isa<ConstantSDNode>(N2)) { 6903 // PINSR* works with constant index. 6904 return Op; 6905 } 6906 return SDValue(); 6907} 6908 6909SDValue 6910X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { 6911 EVT VT = Op.getValueType(); 6912 EVT EltVT = VT.getVectorElementType(); 6913 6914 DebugLoc dl = Op.getDebugLoc(); 6915 SDValue N0 = Op.getOperand(0); 6916 SDValue N1 = Op.getOperand(1); 6917 SDValue N2 = Op.getOperand(2); 6918 6919 // If this is a 256-bit vector result, first extract the 128-bit vector, 6920 // insert the element into the extracted half and then place it back. 6921 if (VT.getSizeInBits() == 256) { 6922 if (!isa<ConstantSDNode>(N2)) 6923 return SDValue(); 6924 6925 // Get the desired 128-bit vector half. 6926 unsigned NumElems = VT.getVectorNumElements(); 6927 unsigned IdxVal = cast<ConstantSDNode>(N2)->getZExtValue(); 6928 bool Upper = IdxVal >= NumElems/2; 6929 SDValue Ins128Idx = DAG.getConstant(Upper ? NumElems/2 : 0, MVT::i32); 6930 SDValue V = Extract128BitVector(N0, Ins128Idx, DAG, dl); 6931 6932 // Insert the element into the desired half. 6933 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, 6934 N1, Upper ? DAG.getConstant(IdxVal-NumElems/2, MVT::i32) : N2); 6935 6936 // Insert the changed part back to the 256-bit vector 6937 return Insert128BitVector(N0, V, Ins128Idx, DAG, dl); 6938 } 6939 6940 if (Subtarget->hasSSE41()) 6941 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 6942 6943 if (EltVT == MVT::i8) 6944 return SDValue(); 6945 6946 if (EltVT.getSizeInBits() == 16 && isa<ConstantSDNode>(N2)) { 6947 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 6948 // as its second argument. 6949 if (N1.getValueType() != MVT::i32) 6950 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 6951 if (N2.getValueType() != MVT::i32) 6952 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 6953 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2); 6954 } 6955 return SDValue(); 6956} 6957 6958SDValue 6959X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const { 6960 LLVMContext *Context = DAG.getContext(); 6961 DebugLoc dl = Op.getDebugLoc(); 6962 EVT OpVT = Op.getValueType(); 6963 6964 // If this is a 256-bit vector result, first insert into a 128-bit 6965 // vector and then insert into the 256-bit vector. 6966 if (OpVT.getSizeInBits() > 128) { 6967 // Insert into a 128-bit vector. 6968 EVT VT128 = EVT::getVectorVT(*Context, 6969 OpVT.getVectorElementType(), 6970 OpVT.getVectorNumElements() / 2); 6971 6972 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0)); 6973 6974 // Insert the 128-bit vector. 6975 return Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, OpVT), Op, 6976 DAG.getConstant(0, MVT::i32), 6977 DAG, dl); 6978 } 6979 6980 if (Op.getValueType() == MVT::v1i64 && 6981 Op.getOperand(0).getValueType() == MVT::i64) 6982 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0)); 6983 6984 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0)); 6985 assert(Op.getValueType().getSimpleVT().getSizeInBits() == 128 && 6986 "Expected an SSE type!"); 6987 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), 6988 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt)); 6989} 6990 6991// Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in 6992// a simple subregister reference or explicit instructions to grab 6993// upper bits of a vector. 6994SDValue 6995X86TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const { 6996 if (Subtarget->hasAVX()) { 6997 DebugLoc dl = Op.getNode()->getDebugLoc(); 6998 SDValue Vec = Op.getNode()->getOperand(0); 6999 SDValue Idx = Op.getNode()->getOperand(1); 7000 7001 if (Op.getNode()->getValueType(0).getSizeInBits() == 128 7002 && Vec.getNode()->getValueType(0).getSizeInBits() == 256) { 7003 return Extract128BitVector(Vec, Idx, DAG, dl); 7004 } 7005 } 7006 return SDValue(); 7007} 7008 7009// Lower a node with an INSERT_SUBVECTOR opcode. This may result in a 7010// simple superregister reference or explicit instructions to insert 7011// the upper bits of a vector. 7012SDValue 7013X86TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const { 7014 if (Subtarget->hasAVX()) { 7015 DebugLoc dl = Op.getNode()->getDebugLoc(); 7016 SDValue Vec = Op.getNode()->getOperand(0); 7017 SDValue SubVec = Op.getNode()->getOperand(1); 7018 SDValue Idx = Op.getNode()->getOperand(2); 7019 7020 if (Op.getNode()->getValueType(0).getSizeInBits() == 256 7021 && SubVec.getNode()->getValueType(0).getSizeInBits() == 128) { 7022 return Insert128BitVector(Vec, SubVec, Idx, DAG, dl); 7023 } 7024 } 7025 return SDValue(); 7026} 7027 7028// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 7029// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 7030// one of the above mentioned nodes. It has to be wrapped because otherwise 7031// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 7032// be used to form addressing mode. These wrapped nodes will be selected 7033// into MOV32ri. 7034SDValue 7035X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { 7036 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 7037 7038 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7039 // global base reg. 7040 unsigned char OpFlag = 0; 7041 unsigned WrapperKind = X86ISD::Wrapper; 7042 CodeModel::Model M = getTargetMachine().getCodeModel(); 7043 7044 if (Subtarget->isPICStyleRIPRel() && 7045 (M == CodeModel::Small || M == CodeModel::Kernel)) 7046 WrapperKind = X86ISD::WrapperRIP; 7047 else if (Subtarget->isPICStyleGOT()) 7048 OpFlag = X86II::MO_GOTOFF; 7049 else if (Subtarget->isPICStyleStubPIC()) 7050 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7051 7052 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(), 7053 CP->getAlignment(), 7054 CP->getOffset(), OpFlag); 7055 DebugLoc DL = CP->getDebugLoc(); 7056 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7057 // With PIC, the address is actually $g + Offset. 7058 if (OpFlag) { 7059 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7060 DAG.getNode(X86ISD::GlobalBaseReg, 7061 DebugLoc(), getPointerTy()), 7062 Result); 7063 } 7064 7065 return Result; 7066} 7067 7068SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 7069 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 7070 7071 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7072 // global base reg. 7073 unsigned char OpFlag = 0; 7074 unsigned WrapperKind = X86ISD::Wrapper; 7075 CodeModel::Model M = getTargetMachine().getCodeModel(); 7076 7077 if (Subtarget->isPICStyleRIPRel() && 7078 (M == CodeModel::Small || M == CodeModel::Kernel)) 7079 WrapperKind = X86ISD::WrapperRIP; 7080 else if (Subtarget->isPICStyleGOT()) 7081 OpFlag = X86II::MO_GOTOFF; 7082 else if (Subtarget->isPICStyleStubPIC()) 7083 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7084 7085 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(), 7086 OpFlag); 7087 DebugLoc DL = JT->getDebugLoc(); 7088 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7089 7090 // With PIC, the address is actually $g + Offset. 7091 if (OpFlag) 7092 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7093 DAG.getNode(X86ISD::GlobalBaseReg, 7094 DebugLoc(), getPointerTy()), 7095 Result); 7096 7097 return Result; 7098} 7099 7100SDValue 7101X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { 7102 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 7103 7104 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7105 // global base reg. 7106 unsigned char OpFlag = 0; 7107 unsigned WrapperKind = X86ISD::Wrapper; 7108 CodeModel::Model M = getTargetMachine().getCodeModel(); 7109 7110 if (Subtarget->isPICStyleRIPRel() && 7111 (M == CodeModel::Small || M == CodeModel::Kernel)) { 7112 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF()) 7113 OpFlag = X86II::MO_GOTPCREL; 7114 WrapperKind = X86ISD::WrapperRIP; 7115 } else if (Subtarget->isPICStyleGOT()) { 7116 OpFlag = X86II::MO_GOT; 7117 } else if (Subtarget->isPICStyleStubPIC()) { 7118 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE; 7119 } else if (Subtarget->isPICStyleStubNoDynamic()) { 7120 OpFlag = X86II::MO_DARWIN_NONLAZY; 7121 } 7122 7123 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag); 7124 7125 DebugLoc DL = Op.getDebugLoc(); 7126 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7127 7128 7129 // With PIC, the address is actually $g + Offset. 7130 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 7131 !Subtarget->is64Bit()) { 7132 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7133 DAG.getNode(X86ISD::GlobalBaseReg, 7134 DebugLoc(), getPointerTy()), 7135 Result); 7136 } 7137 7138 // For symbols that require a load from a stub to get the address, emit the 7139 // load. 7140 if (isGlobalStubReference(OpFlag)) 7141 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result, 7142 MachinePointerInfo::getGOT(), false, false, false, 0); 7143 7144 return Result; 7145} 7146 7147SDValue 7148X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 7149 // Create the TargetBlockAddressAddress node. 7150 unsigned char OpFlags = 7151 Subtarget->ClassifyBlockAddressReference(); 7152 CodeModel::Model M = getTargetMachine().getCodeModel(); 7153 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 7154 DebugLoc dl = Op.getDebugLoc(); 7155 SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), 7156 /*isTarget=*/true, OpFlags); 7157 7158 if (Subtarget->isPICStyleRIPRel() && 7159 (M == CodeModel::Small || M == CodeModel::Kernel)) 7160 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 7161 else 7162 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 7163 7164 // With PIC, the address is actually $g + Offset. 7165 if (isGlobalRelativeToPICBase(OpFlags)) { 7166 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 7167 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 7168 Result); 7169 } 7170 7171 return Result; 7172} 7173 7174SDValue 7175X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, 7176 int64_t Offset, 7177 SelectionDAG &DAG) const { 7178 // Create the TargetGlobalAddress node, folding in the constant 7179 // offset if it is legal. 7180 unsigned char OpFlags = 7181 Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); 7182 CodeModel::Model M = getTargetMachine().getCodeModel(); 7183 SDValue Result; 7184 if (OpFlags == X86II::MO_NO_FLAG && 7185 X86::isOffsetSuitableForCodeModel(Offset, M)) { 7186 // A direct static reference to a global. 7187 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset); 7188 Offset = 0; 7189 } else { 7190 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 7191 } 7192 7193 if (Subtarget->isPICStyleRIPRel() && 7194 (M == CodeModel::Small || M == CodeModel::Kernel)) 7195 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 7196 else 7197 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 7198 7199 // With PIC, the address is actually $g + Offset. 7200 if (isGlobalRelativeToPICBase(OpFlags)) { 7201 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 7202 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 7203 Result); 7204 } 7205 7206 // For globals that require a load from a stub to get the address, emit the 7207 // load. 7208 if (isGlobalStubReference(OpFlags)) 7209 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result, 7210 MachinePointerInfo::getGOT(), false, false, false, 0); 7211 7212 // If there was a non-zero offset that we didn't fold, create an explicit 7213 // addition for it. 7214 if (Offset != 0) 7215 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result, 7216 DAG.getConstant(Offset, getPointerTy())); 7217 7218 return Result; 7219} 7220 7221SDValue 7222X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { 7223 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 7224 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); 7225 return LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG); 7226} 7227 7228static SDValue 7229GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, 7230 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg, 7231 unsigned char OperandFlags) { 7232 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7233 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7234 DebugLoc dl = GA->getDebugLoc(); 7235 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7236 GA->getValueType(0), 7237 GA->getOffset(), 7238 OperandFlags); 7239 if (InFlag) { 7240 SDValue Ops[] = { Chain, TGA, *InFlag }; 7241 Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 3); 7242 } else { 7243 SDValue Ops[] = { Chain, TGA }; 7244 Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 2); 7245 } 7246 7247 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 7248 MFI->setAdjustsStack(true); 7249 7250 SDValue Flag = Chain.getValue(1); 7251 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag); 7252} 7253 7254// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit 7255static SDValue 7256LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7257 const EVT PtrVT) { 7258 SDValue InFlag; 7259 DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better 7260 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 7261 DAG.getNode(X86ISD::GlobalBaseReg, 7262 DebugLoc(), PtrVT), InFlag); 7263 InFlag = Chain.getValue(1); 7264 7265 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD); 7266} 7267 7268// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit 7269static SDValue 7270LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7271 const EVT PtrVT) { 7272 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, 7273 X86::RAX, X86II::MO_TLSGD); 7274} 7275 7276// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 7277// "local exec" model. 7278static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7279 const EVT PtrVT, TLSModel::Model model, 7280 bool is64Bit) { 7281 DebugLoc dl = GA->getDebugLoc(); 7282 7283 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit). 7284 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(), 7285 is64Bit ? 257 : 256)); 7286 7287 SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 7288 DAG.getIntPtrConstant(0), 7289 MachinePointerInfo(Ptr), 7290 false, false, false, 0); 7291 7292 unsigned char OperandFlags = 0; 7293 // Most TLS accesses are not RIP relative, even on x86-64. One exception is 7294 // initialexec. 7295 unsigned WrapperKind = X86ISD::Wrapper; 7296 if (model == TLSModel::LocalExec) { 7297 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF; 7298 } else if (is64Bit) { 7299 assert(model == TLSModel::InitialExec); 7300 OperandFlags = X86II::MO_GOTTPOFF; 7301 WrapperKind = X86ISD::WrapperRIP; 7302 } else { 7303 assert(model == TLSModel::InitialExec); 7304 OperandFlags = X86II::MO_INDNTPOFF; 7305 } 7306 7307 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 7308 // exec) 7309 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7310 GA->getValueType(0), 7311 GA->getOffset(), OperandFlags); 7312 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 7313 7314 if (model == TLSModel::InitialExec) 7315 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, 7316 MachinePointerInfo::getGOT(), false, false, false, 0); 7317 7318 // The address of the thread local variable is the add of the thread 7319 // pointer with the offset of the variable. 7320 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 7321} 7322 7323SDValue 7324X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 7325 7326 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 7327 const GlobalValue *GV = GA->getGlobal(); 7328 7329 if (Subtarget->isTargetELF()) { 7330 // TODO: implement the "local dynamic" model 7331 // TODO: implement the "initial exec"model for pic executables 7332 7333 // If GV is an alias then use the aliasee for determining 7334 // thread-localness. 7335 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 7336 GV = GA->resolveAliasedGlobal(false); 7337 7338 TLSModel::Model model 7339 = getTLSModel(GV, getTargetMachine().getRelocationModel()); 7340 7341 switch (model) { 7342 case TLSModel::GeneralDynamic: 7343 case TLSModel::LocalDynamic: // not implemented 7344 if (Subtarget->is64Bit()) 7345 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy()); 7346 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy()); 7347 7348 case TLSModel::InitialExec: 7349 case TLSModel::LocalExec: 7350 return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, 7351 Subtarget->is64Bit()); 7352 } 7353 } else if (Subtarget->isTargetDarwin()) { 7354 // Darwin only has one model of TLS. Lower to that. 7355 unsigned char OpFlag = 0; 7356 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ? 7357 X86ISD::WrapperRIP : X86ISD::Wrapper; 7358 7359 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7360 // global base reg. 7361 bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) && 7362 !Subtarget->is64Bit(); 7363 if (PIC32) 7364 OpFlag = X86II::MO_TLVP_PIC_BASE; 7365 else 7366 OpFlag = X86II::MO_TLVP; 7367 DebugLoc DL = Op.getDebugLoc(); 7368 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, 7369 GA->getValueType(0), 7370 GA->getOffset(), OpFlag); 7371 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7372 7373 // With PIC32, the address is actually $g + Offset. 7374 if (PIC32) 7375 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7376 DAG.getNode(X86ISD::GlobalBaseReg, 7377 DebugLoc(), getPointerTy()), 7378 Offset); 7379 7380 // Lowering the machine isd will make sure everything is in the right 7381 // location. 7382 SDValue Chain = DAG.getEntryNode(); 7383 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7384 SDValue Args[] = { Chain, Offset }; 7385 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args, 2); 7386 7387 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls. 7388 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7389 MFI->setAdjustsStack(true); 7390 7391 // And our return value (tls address) is in the standard call return value 7392 // location. 7393 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 7394 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(), 7395 Chain.getValue(1)); 7396 } 7397 7398 llvm_unreachable("TLS not implemented for this target."); 7399} 7400 7401 7402/// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values 7403/// and take a 2 x i32 value to shift plus a shift amount. 7404SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const{ 7405 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 7406 EVT VT = Op.getValueType(); 7407 unsigned VTBits = VT.getSizeInBits(); 7408 DebugLoc dl = Op.getDebugLoc(); 7409 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 7410 SDValue ShOpLo = Op.getOperand(0); 7411 SDValue ShOpHi = Op.getOperand(1); 7412 SDValue ShAmt = Op.getOperand(2); 7413 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 7414 DAG.getConstant(VTBits - 1, MVT::i8)) 7415 : DAG.getConstant(0, VT); 7416 7417 SDValue Tmp2, Tmp3; 7418 if (Op.getOpcode() == ISD::SHL_PARTS) { 7419 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt); 7420 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 7421 } else { 7422 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt); 7423 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, ShAmt); 7424 } 7425 7426 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt, 7427 DAG.getConstant(VTBits, MVT::i8)); 7428 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 7429 AndNode, DAG.getConstant(0, MVT::i8)); 7430 7431 SDValue Hi, Lo; 7432 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8); 7433 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond }; 7434 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond }; 7435 7436 if (Op.getOpcode() == ISD::SHL_PARTS) { 7437 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 7438 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 7439 } else { 7440 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 7441 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 7442 } 7443 7444 SDValue Ops[2] = { Lo, Hi }; 7445 return DAG.getMergeValues(Ops, 2, dl); 7446} 7447 7448SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, 7449 SelectionDAG &DAG) const { 7450 EVT SrcVT = Op.getOperand(0).getValueType(); 7451 7452 if (SrcVT.isVector()) 7453 return SDValue(); 7454 7455 assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 && 7456 "Unknown SINT_TO_FP to lower!"); 7457 7458 // These are really Legal; return the operand so the caller accepts it as 7459 // Legal. 7460 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 7461 return Op; 7462 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) && 7463 Subtarget->is64Bit()) { 7464 return Op; 7465 } 7466 7467 DebugLoc dl = Op.getDebugLoc(); 7468 unsigned Size = SrcVT.getSizeInBits()/8; 7469 MachineFunction &MF = DAG.getMachineFunction(); 7470 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false); 7471 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7472 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 7473 StackSlot, 7474 MachinePointerInfo::getFixedStack(SSFI), 7475 false, false, 0); 7476 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG); 7477} 7478 7479SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, 7480 SDValue StackSlot, 7481 SelectionDAG &DAG) const { 7482 // Build the FILD 7483 DebugLoc DL = Op.getDebugLoc(); 7484 SDVTList Tys; 7485 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 7486 if (useSSE) 7487 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue); 7488 else 7489 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 7490 7491 unsigned ByteSize = SrcVT.getSizeInBits()/8; 7492 7493 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot); 7494 MachineMemOperand *MMO; 7495 if (FI) { 7496 int SSFI = FI->getIndex(); 7497 MMO = 7498 DAG.getMachineFunction() 7499 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7500 MachineMemOperand::MOLoad, ByteSize, ByteSize); 7501 } else { 7502 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand(); 7503 StackSlot = StackSlot.getOperand(1); 7504 } 7505 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) }; 7506 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : 7507 X86ISD::FILD, DL, 7508 Tys, Ops, array_lengthof(Ops), 7509 SrcVT, MMO); 7510 7511 if (useSSE) { 7512 Chain = Result.getValue(1); 7513 SDValue InFlag = Result.getValue(2); 7514 7515 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 7516 // shouldn't be necessary except that RFP cannot be live across 7517 // multiple blocks. When stackifier is fixed, they can be uncoupled. 7518 MachineFunction &MF = DAG.getMachineFunction(); 7519 unsigned SSFISize = Op.getValueType().getSizeInBits()/8; 7520 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false); 7521 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7522 Tys = DAG.getVTList(MVT::Other); 7523 SDValue Ops[] = { 7524 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag 7525 }; 7526 MachineMemOperand *MMO = 7527 DAG.getMachineFunction() 7528 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7529 MachineMemOperand::MOStore, SSFISize, SSFISize); 7530 7531 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, 7532 Ops, array_lengthof(Ops), 7533 Op.getValueType(), MMO); 7534 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot, 7535 MachinePointerInfo::getFixedStack(SSFI), 7536 false, false, false, 0); 7537 } 7538 7539 return Result; 7540} 7541 7542// LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion. 7543SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, 7544 SelectionDAG &DAG) const { 7545 // This algorithm is not obvious. Here it is what we're trying to output: 7546 /* 7547 movq %rax, %xmm0 7548 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U } 7549 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 } 7550 #ifdef __SSE3__ 7551 haddpd %xmm0, %xmm0 7552 #else 7553 pshufd $0x4e, %xmm0, %xmm1 7554 addpd %xmm1, %xmm0 7555 #endif 7556 */ 7557 7558 DebugLoc dl = Op.getDebugLoc(); 7559 LLVMContext *Context = DAG.getContext(); 7560 7561 // Build some magic constants. 7562 SmallVector<Constant*,4> CV0; 7563 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0x43300000))); 7564 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0x45300000))); 7565 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0))); 7566 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0))); 7567 Constant *C0 = ConstantVector::get(CV0); 7568 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16); 7569 7570 SmallVector<Constant*,2> CV1; 7571 CV1.push_back( 7572 ConstantFP::get(*Context, APFloat(APInt(64, 0x4330000000000000ULL)))); 7573 CV1.push_back( 7574 ConstantFP::get(*Context, APFloat(APInt(64, 0x4530000000000000ULL)))); 7575 Constant *C1 = ConstantVector::get(CV1); 7576 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16); 7577 7578 // Load the 64-bit value into an XMM register. 7579 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 7580 Op.getOperand(0)); 7581 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0, 7582 MachinePointerInfo::getConstantPool(), 7583 false, false, false, 16); 7584 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32, 7585 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1), 7586 CLod0); 7587 7588 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1, 7589 MachinePointerInfo::getConstantPool(), 7590 false, false, false, 16); 7591 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1); 7592 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1); 7593 SDValue Result; 7594 7595 if (Subtarget->hasSSE3()) { 7596 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'. 7597 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub); 7598 } else { 7599 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub); 7600 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32, 7601 S2F, 0x4E, DAG); 7602 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, 7603 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle), 7604 Sub); 7605 } 7606 7607 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result, 7608 DAG.getIntPtrConstant(0)); 7609} 7610 7611// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion. 7612SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, 7613 SelectionDAG &DAG) const { 7614 DebugLoc dl = Op.getDebugLoc(); 7615 // FP constant to bias correct the final result. 7616 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), 7617 MVT::f64); 7618 7619 // Load the 32-bit value into an XMM register. 7620 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 7621 Op.getOperand(0)); 7622 7623 // Zero out the upper parts of the register. 7624 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG); 7625 7626 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 7627 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load), 7628 DAG.getIntPtrConstant(0)); 7629 7630 // Or the load with the bias. 7631 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, 7632 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 7633 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 7634 MVT::v2f64, Load)), 7635 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 7636 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 7637 MVT::v2f64, Bias))); 7638 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 7639 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or), 7640 DAG.getIntPtrConstant(0)); 7641 7642 // Subtract the bias. 7643 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias); 7644 7645 // Handle final rounding. 7646 EVT DestVT = Op.getValueType(); 7647 7648 if (DestVT.bitsLT(MVT::f64)) { 7649 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 7650 DAG.getIntPtrConstant(0)); 7651 } else if (DestVT.bitsGT(MVT::f64)) { 7652 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 7653 } 7654 7655 // Handle final rounding. 7656 return Sub; 7657} 7658 7659SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, 7660 SelectionDAG &DAG) const { 7661 SDValue N0 = Op.getOperand(0); 7662 DebugLoc dl = Op.getDebugLoc(); 7663 7664 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't 7665 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform 7666 // the optimization here. 7667 if (DAG.SignBitIsZero(N0)) 7668 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0); 7669 7670 EVT SrcVT = N0.getValueType(); 7671 EVT DstVT = Op.getValueType(); 7672 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64) 7673 return LowerUINT_TO_FP_i64(Op, DAG); 7674 else if (SrcVT == MVT::i32 && X86ScalarSSEf64) 7675 return LowerUINT_TO_FP_i32(Op, DAG); 7676 else if (Subtarget->is64Bit() && 7677 SrcVT == MVT::i64 && DstVT == MVT::f32) 7678 return SDValue(); 7679 7680 // Make a 64-bit buffer, and use it to build an FILD. 7681 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64); 7682 if (SrcVT == MVT::i32) { 7683 SDValue WordOff = DAG.getConstant(4, getPointerTy()); 7684 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl, 7685 getPointerTy(), StackSlot, WordOff); 7686 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 7687 StackSlot, MachinePointerInfo(), 7688 false, false, 0); 7689 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32), 7690 OffsetSlot, MachinePointerInfo(), 7691 false, false, 0); 7692 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG); 7693 return Fild; 7694 } 7695 7696 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP"); 7697 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 7698 StackSlot, MachinePointerInfo(), 7699 false, false, 0); 7700 // For i64 source, we need to add the appropriate power of 2 if the input 7701 // was negative. This is the same as the optimization in 7702 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here, 7703 // we must be careful to do the computation in x87 extended precision, not 7704 // in SSE. (The generic code can't know it's OK to do this, or how to.) 7705 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex(); 7706 MachineMemOperand *MMO = 7707 DAG.getMachineFunction() 7708 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7709 MachineMemOperand::MOLoad, 8, 8); 7710 7711 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other); 7712 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) }; 7713 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, 3, 7714 MVT::i64, MMO); 7715 7716 APInt FF(32, 0x5F800000ULL); 7717 7718 // Check whether the sign bit is set. 7719 SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(MVT::i64), 7720 Op.getOperand(0), DAG.getConstant(0, MVT::i64), 7721 ISD::SETLT); 7722 7723 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits. 7724 SDValue FudgePtr = DAG.getConstantPool( 7725 ConstantInt::get(*DAG.getContext(), FF.zext(64)), 7726 getPointerTy()); 7727 7728 // Get a pointer to FF if the sign bit was set, or to 0 otherwise. 7729 SDValue Zero = DAG.getIntPtrConstant(0); 7730 SDValue Four = DAG.getIntPtrConstant(4); 7731 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet, 7732 Zero, Four); 7733 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset); 7734 7735 // Load the value out, extending it from f32 to f80. 7736 // FIXME: Avoid the extend by constructing the right constant pool? 7737 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), 7738 FudgePtr, MachinePointerInfo::getConstantPool(), 7739 MVT::f32, false, false, 4); 7740 // Extend everything to 80 bits to force it to be done on x87. 7741 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge); 7742 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0)); 7743} 7744 7745std::pair<SDValue,SDValue> X86TargetLowering:: 7746FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) const { 7747 DebugLoc DL = Op.getDebugLoc(); 7748 7749 EVT DstTy = Op.getValueType(); 7750 7751 if (!IsSigned) { 7752 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT"); 7753 DstTy = MVT::i64; 7754 } 7755 7756 assert(DstTy.getSimpleVT() <= MVT::i64 && 7757 DstTy.getSimpleVT() >= MVT::i16 && 7758 "Unknown FP_TO_SINT to lower!"); 7759 7760 // These are really Legal. 7761 if (DstTy == MVT::i32 && 7762 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 7763 return std::make_pair(SDValue(), SDValue()); 7764 if (Subtarget->is64Bit() && 7765 DstTy == MVT::i64 && 7766 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 7767 return std::make_pair(SDValue(), SDValue()); 7768 7769 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 7770 // stack slot. 7771 MachineFunction &MF = DAG.getMachineFunction(); 7772 unsigned MemSize = DstTy.getSizeInBits()/8; 7773 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 7774 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7775 7776 7777 7778 unsigned Opc; 7779 switch (DstTy.getSimpleVT().SimpleTy) { 7780 default: llvm_unreachable("Invalid FP_TO_SINT to lower!"); 7781 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 7782 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 7783 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 7784 } 7785 7786 SDValue Chain = DAG.getEntryNode(); 7787 SDValue Value = Op.getOperand(0); 7788 EVT TheVT = Op.getOperand(0).getValueType(); 7789 if (isScalarFPTypeInSSEReg(TheVT)) { 7790 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 7791 Chain = DAG.getStore(Chain, DL, Value, StackSlot, 7792 MachinePointerInfo::getFixedStack(SSFI), 7793 false, false, 0); 7794 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 7795 SDValue Ops[] = { 7796 Chain, StackSlot, DAG.getValueType(TheVT) 7797 }; 7798 7799 MachineMemOperand *MMO = 7800 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7801 MachineMemOperand::MOLoad, MemSize, MemSize); 7802 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, 3, 7803 DstTy, MMO); 7804 Chain = Value.getValue(1); 7805 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 7806 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7807 } 7808 7809 MachineMemOperand *MMO = 7810 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7811 MachineMemOperand::MOStore, MemSize, MemSize); 7812 7813 // Build the FP_TO_INT*_IN_MEM 7814 SDValue Ops[] = { Chain, Value, StackSlot }; 7815 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other), 7816 Ops, 3, DstTy, MMO); 7817 7818 return std::make_pair(FIST, StackSlot); 7819} 7820 7821SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, 7822 SelectionDAG &DAG) const { 7823 if (Op.getValueType().isVector()) 7824 return SDValue(); 7825 7826 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, true); 7827 SDValue FIST = Vals.first, StackSlot = Vals.second; 7828 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal. 7829 if (FIST.getNode() == 0) return Op; 7830 7831 // Load the result. 7832 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 7833 FIST, StackSlot, MachinePointerInfo(), 7834 false, false, false, 0); 7835} 7836 7837SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, 7838 SelectionDAG &DAG) const { 7839 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, false); 7840 SDValue FIST = Vals.first, StackSlot = Vals.second; 7841 assert(FIST.getNode() && "Unexpected failure"); 7842 7843 // Load the result. 7844 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 7845 FIST, StackSlot, MachinePointerInfo(), 7846 false, false, false, 0); 7847} 7848 7849SDValue X86TargetLowering::LowerFABS(SDValue Op, 7850 SelectionDAG &DAG) const { 7851 LLVMContext *Context = DAG.getContext(); 7852 DebugLoc dl = Op.getDebugLoc(); 7853 EVT VT = Op.getValueType(); 7854 EVT EltVT = VT; 7855 if (VT.isVector()) 7856 EltVT = VT.getVectorElementType(); 7857 Constant *C; 7858 if (EltVT == MVT::f64) { 7859 C = ConstantVector::getSplat(2, 7860 ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63))))); 7861 } else { 7862 C = ConstantVector::getSplat(4, 7863 ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31))))); 7864 } 7865 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 7866 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 7867 MachinePointerInfo::getConstantPool(), 7868 false, false, false, 16); 7869 return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask); 7870} 7871 7872SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const { 7873 LLVMContext *Context = DAG.getContext(); 7874 DebugLoc dl = Op.getDebugLoc(); 7875 EVT VT = Op.getValueType(); 7876 EVT EltVT = VT; 7877 unsigned NumElts = VT == MVT::f64 ? 2 : 4; 7878 if (VT.isVector()) { 7879 EltVT = VT.getVectorElementType(); 7880 NumElts = VT.getVectorNumElements(); 7881 } 7882 Constant *C; 7883 if (EltVT == MVT::f64) 7884 C = ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63))); 7885 else 7886 C = ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31))); 7887 C = ConstantVector::getSplat(NumElts, C); 7888 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 7889 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 7890 MachinePointerInfo::getConstantPool(), 7891 false, false, false, 16); 7892 if (VT.isVector()) { 7893 MVT XORVT = VT.getSizeInBits() == 128 ? MVT::v2i64 : MVT::v4i64; 7894 return DAG.getNode(ISD::BITCAST, dl, VT, 7895 DAG.getNode(ISD::XOR, dl, XORVT, 7896 DAG.getNode(ISD::BITCAST, dl, XORVT, 7897 Op.getOperand(0)), 7898 DAG.getNode(ISD::BITCAST, dl, XORVT, Mask))); 7899 } else { 7900 return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask); 7901 } 7902} 7903 7904SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 7905 LLVMContext *Context = DAG.getContext(); 7906 SDValue Op0 = Op.getOperand(0); 7907 SDValue Op1 = Op.getOperand(1); 7908 DebugLoc dl = Op.getDebugLoc(); 7909 EVT VT = Op.getValueType(); 7910 EVT SrcVT = Op1.getValueType(); 7911 7912 // If second operand is smaller, extend it first. 7913 if (SrcVT.bitsLT(VT)) { 7914 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1); 7915 SrcVT = VT; 7916 } 7917 // And if it is bigger, shrink it first. 7918 if (SrcVT.bitsGT(VT)) { 7919 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1)); 7920 SrcVT = VT; 7921 } 7922 7923 // At this point the operands and the result should have the same 7924 // type, and that won't be f80 since that is not custom lowered. 7925 7926 // First get the sign bit of second operand. 7927 SmallVector<Constant*,4> CV; 7928 if (SrcVT == MVT::f64) { 7929 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63)))); 7930 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 7931 } else { 7932 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31)))); 7933 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 7934 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 7935 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 7936 } 7937 Constant *C = ConstantVector::get(CV); 7938 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 7939 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx, 7940 MachinePointerInfo::getConstantPool(), 7941 false, false, false, 16); 7942 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1); 7943 7944 // Shift sign bit right or left if the two operands have different types. 7945 if (SrcVT.bitsGT(VT)) { 7946 // Op0 is MVT::f32, Op1 is MVT::f64. 7947 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit); 7948 SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit, 7949 DAG.getConstant(32, MVT::i32)); 7950 SignBit = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, SignBit); 7951 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit, 7952 DAG.getIntPtrConstant(0)); 7953 } 7954 7955 // Clear first operand sign bit. 7956 CV.clear(); 7957 if (VT == MVT::f64) { 7958 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63))))); 7959 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 7960 } else { 7961 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31))))); 7962 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 7963 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 7964 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 7965 } 7966 C = ConstantVector::get(CV); 7967 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 7968 SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 7969 MachinePointerInfo::getConstantPool(), 7970 false, false, false, 16); 7971 SDValue Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Mask2); 7972 7973 // Or the value with the sign bit. 7974 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit); 7975} 7976 7977SDValue X86TargetLowering::LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) const { 7978 SDValue N0 = Op.getOperand(0); 7979 DebugLoc dl = Op.getDebugLoc(); 7980 EVT VT = Op.getValueType(); 7981 7982 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1). 7983 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0, 7984 DAG.getConstant(1, VT)); 7985 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT)); 7986} 7987 7988/// Emit nodes that will be selected as "test Op0,Op0", or something 7989/// equivalent. 7990SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, 7991 SelectionDAG &DAG) const { 7992 DebugLoc dl = Op.getDebugLoc(); 7993 7994 // CF and OF aren't always set the way we want. Determine which 7995 // of these we need. 7996 bool NeedCF = false; 7997 bool NeedOF = false; 7998 switch (X86CC) { 7999 default: break; 8000 case X86::COND_A: case X86::COND_AE: 8001 case X86::COND_B: case X86::COND_BE: 8002 NeedCF = true; 8003 break; 8004 case X86::COND_G: case X86::COND_GE: 8005 case X86::COND_L: case X86::COND_LE: 8006 case X86::COND_O: case X86::COND_NO: 8007 NeedOF = true; 8008 break; 8009 } 8010 8011 // See if we can use the EFLAGS value from the operand instead of 8012 // doing a separate TEST. TEST always sets OF and CF to 0, so unless 8013 // we prove that the arithmetic won't overflow, we can't use OF or CF. 8014 if (Op.getResNo() != 0 || NeedOF || NeedCF) 8015 // Emit a CMP with 0, which is the TEST pattern. 8016 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 8017 DAG.getConstant(0, Op.getValueType())); 8018 8019 unsigned Opcode = 0; 8020 unsigned NumOperands = 0; 8021 switch (Op.getNode()->getOpcode()) { 8022 case ISD::ADD: 8023 // Due to an isel shortcoming, be conservative if this add is likely to be 8024 // selected as part of a load-modify-store instruction. When the root node 8025 // in a match is a store, isel doesn't know how to remap non-chain non-flag 8026 // uses of other nodes in the match, such as the ADD in this case. This 8027 // leads to the ADD being left around and reselected, with the result being 8028 // two adds in the output. Alas, even if none our users are stores, that 8029 // doesn't prove we're O.K. Ergo, if we have any parents that aren't 8030 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require 8031 // climbing the DAG back to the root, and it doesn't seem to be worth the 8032 // effort. 8033 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8034 UE = Op.getNode()->use_end(); UI != UE; ++UI) 8035 if (UI->getOpcode() != ISD::CopyToReg && 8036 UI->getOpcode() != ISD::SETCC && 8037 UI->getOpcode() != ISD::STORE) 8038 goto default_case; 8039 8040 if (ConstantSDNode *C = 8041 dyn_cast<ConstantSDNode>(Op.getNode()->getOperand(1))) { 8042 // An add of one will be selected as an INC. 8043 if (C->getAPIntValue() == 1) { 8044 Opcode = X86ISD::INC; 8045 NumOperands = 1; 8046 break; 8047 } 8048 8049 // An add of negative one (subtract of one) will be selected as a DEC. 8050 if (C->getAPIntValue().isAllOnesValue()) { 8051 Opcode = X86ISD::DEC; 8052 NumOperands = 1; 8053 break; 8054 } 8055 } 8056 8057 // Otherwise use a regular EFLAGS-setting add. 8058 Opcode = X86ISD::ADD; 8059 NumOperands = 2; 8060 break; 8061 case ISD::AND: { 8062 // If the primary and result isn't used, don't bother using X86ISD::AND, 8063 // because a TEST instruction will be better. 8064 bool NonFlagUse = false; 8065 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8066 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 8067 SDNode *User = *UI; 8068 unsigned UOpNo = UI.getOperandNo(); 8069 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) { 8070 // Look pass truncate. 8071 UOpNo = User->use_begin().getOperandNo(); 8072 User = *User->use_begin(); 8073 } 8074 8075 if (User->getOpcode() != ISD::BRCOND && 8076 User->getOpcode() != ISD::SETCC && 8077 (User->getOpcode() != ISD::SELECT || UOpNo != 0)) { 8078 NonFlagUse = true; 8079 break; 8080 } 8081 } 8082 8083 if (!NonFlagUse) 8084 break; 8085 } 8086 // FALL THROUGH 8087 case ISD::SUB: 8088 case ISD::OR: 8089 case ISD::XOR: 8090 // Due to the ISEL shortcoming noted above, be conservative if this op is 8091 // likely to be selected as part of a load-modify-store instruction. 8092 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8093 UE = Op.getNode()->use_end(); UI != UE; ++UI) 8094 if (UI->getOpcode() == ISD::STORE) 8095 goto default_case; 8096 8097 // Otherwise use a regular EFLAGS-setting instruction. 8098 switch (Op.getNode()->getOpcode()) { 8099 default: llvm_unreachable("unexpected operator!"); 8100 case ISD::SUB: Opcode = X86ISD::SUB; break; 8101 case ISD::OR: Opcode = X86ISD::OR; break; 8102 case ISD::XOR: Opcode = X86ISD::XOR; break; 8103 case ISD::AND: Opcode = X86ISD::AND; break; 8104 } 8105 8106 NumOperands = 2; 8107 break; 8108 case X86ISD::ADD: 8109 case X86ISD::SUB: 8110 case X86ISD::INC: 8111 case X86ISD::DEC: 8112 case X86ISD::OR: 8113 case X86ISD::XOR: 8114 case X86ISD::AND: 8115 return SDValue(Op.getNode(), 1); 8116 default: 8117 default_case: 8118 break; 8119 } 8120 8121 if (Opcode == 0) 8122 // Emit a CMP with 0, which is the TEST pattern. 8123 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 8124 DAG.getConstant(0, Op.getValueType())); 8125 8126 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 8127 SmallVector<SDValue, 4> Ops; 8128 for (unsigned i = 0; i != NumOperands; ++i) 8129 Ops.push_back(Op.getOperand(i)); 8130 8131 SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands); 8132 DAG.ReplaceAllUsesWith(Op, New); 8133 return SDValue(New.getNode(), 1); 8134} 8135 8136/// Emit nodes that will be selected as "cmp Op0,Op1", or something 8137/// equivalent. 8138SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, 8139 SelectionDAG &DAG) const { 8140 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) 8141 if (C->getAPIntValue() == 0) 8142 return EmitTest(Op0, X86CC, DAG); 8143 8144 DebugLoc dl = Op0.getDebugLoc(); 8145 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1); 8146} 8147 8148/// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node 8149/// if it's possible. 8150SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, 8151 DebugLoc dl, SelectionDAG &DAG) const { 8152 SDValue Op0 = And.getOperand(0); 8153 SDValue Op1 = And.getOperand(1); 8154 if (Op0.getOpcode() == ISD::TRUNCATE) 8155 Op0 = Op0.getOperand(0); 8156 if (Op1.getOpcode() == ISD::TRUNCATE) 8157 Op1 = Op1.getOperand(0); 8158 8159 SDValue LHS, RHS; 8160 if (Op1.getOpcode() == ISD::SHL) 8161 std::swap(Op0, Op1); 8162 if (Op0.getOpcode() == ISD::SHL) { 8163 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0))) 8164 if (And00C->getZExtValue() == 1) { 8165 // If we looked past a truncate, check that it's only truncating away 8166 // known zeros. 8167 unsigned BitWidth = Op0.getValueSizeInBits(); 8168 unsigned AndBitWidth = And.getValueSizeInBits(); 8169 if (BitWidth > AndBitWidth) { 8170 APInt Mask = APInt::getAllOnesValue(BitWidth), Zeros, Ones; 8171 DAG.ComputeMaskedBits(Op0, Mask, Zeros, Ones); 8172 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth) 8173 return SDValue(); 8174 } 8175 LHS = Op1; 8176 RHS = Op0.getOperand(1); 8177 } 8178 } else if (Op1.getOpcode() == ISD::Constant) { 8179 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1); 8180 uint64_t AndRHSVal = AndRHS->getZExtValue(); 8181 SDValue AndLHS = Op0; 8182 8183 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) { 8184 LHS = AndLHS.getOperand(0); 8185 RHS = AndLHS.getOperand(1); 8186 } 8187 8188 // Use BT if the immediate can't be encoded in a TEST instruction. 8189 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) { 8190 LHS = AndLHS; 8191 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType()); 8192 } 8193 } 8194 8195 if (LHS.getNode()) { 8196 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT 8197 // instruction. Since the shift amount is in-range-or-undefined, we know 8198 // that doing a bittest on the i32 value is ok. We extend to i32 because 8199 // the encoding for the i16 version is larger than the i32 version. 8200 // Also promote i16 to i32 for performance / code size reason. 8201 if (LHS.getValueType() == MVT::i8 || 8202 LHS.getValueType() == MVT::i16) 8203 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS); 8204 8205 // If the operand types disagree, extend the shift amount to match. Since 8206 // BT ignores high bits (like shifts) we can use anyextend. 8207 if (LHS.getValueType() != RHS.getValueType()) 8208 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS); 8209 8210 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS); 8211 unsigned Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B; 8212 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 8213 DAG.getConstant(Cond, MVT::i8), BT); 8214 } 8215 8216 return SDValue(); 8217} 8218 8219SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 8220 8221 if (Op.getValueType().isVector()) return LowerVSETCC(Op, DAG); 8222 8223 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 8224 SDValue Op0 = Op.getOperand(0); 8225 SDValue Op1 = Op.getOperand(1); 8226 DebugLoc dl = Op.getDebugLoc(); 8227 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 8228 8229 // Optimize to BT if possible. 8230 // Lower (X & (1 << N)) == 0 to BT(X, N). 8231 // Lower ((X >>u N) & 1) != 0 to BT(X, N). 8232 // Lower ((X >>s N) & 1) != 0 to BT(X, N). 8233 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && 8234 Op1.getOpcode() == ISD::Constant && 8235 cast<ConstantSDNode>(Op1)->isNullValue() && 8236 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 8237 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG); 8238 if (NewSetCC.getNode()) 8239 return NewSetCC; 8240 } 8241 8242 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of 8243 // these. 8244 if (Op1.getOpcode() == ISD::Constant && 8245 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 || 8246 cast<ConstantSDNode>(Op1)->isNullValue()) && 8247 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 8248 8249 // If the input is a setcc, then reuse the input setcc or use a new one with 8250 // the inverted condition. 8251 if (Op0.getOpcode() == X86ISD::SETCC) { 8252 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0); 8253 bool Invert = (CC == ISD::SETNE) ^ 8254 cast<ConstantSDNode>(Op1)->isNullValue(); 8255 if (!Invert) return Op0; 8256 8257 CCode = X86::GetOppositeBranchCondition(CCode); 8258 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 8259 DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1)); 8260 } 8261 } 8262 8263 bool isFP = Op1.getValueType().isFloatingPoint(); 8264 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG); 8265 if (X86CC == X86::COND_INVALID) 8266 return SDValue(); 8267 8268 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, DAG); 8269 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 8270 DAG.getConstant(X86CC, MVT::i8), EFLAGS); 8271} 8272 8273// Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128 8274// ones, and then concatenate the result back. 8275static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) { 8276 EVT VT = Op.getValueType(); 8277 8278 assert(VT.getSizeInBits() == 256 && Op.getOpcode() == ISD::SETCC && 8279 "Unsupported value type for operation"); 8280 8281 int NumElems = VT.getVectorNumElements(); 8282 DebugLoc dl = Op.getDebugLoc(); 8283 SDValue CC = Op.getOperand(2); 8284 SDValue Idx0 = DAG.getConstant(0, MVT::i32); 8285 SDValue Idx1 = DAG.getConstant(NumElems/2, MVT::i32); 8286 8287 // Extract the LHS vectors 8288 SDValue LHS = Op.getOperand(0); 8289 SDValue LHS1 = Extract128BitVector(LHS, Idx0, DAG, dl); 8290 SDValue LHS2 = Extract128BitVector(LHS, Idx1, DAG, dl); 8291 8292 // Extract the RHS vectors 8293 SDValue RHS = Op.getOperand(1); 8294 SDValue RHS1 = Extract128BitVector(RHS, Idx0, DAG, dl); 8295 SDValue RHS2 = Extract128BitVector(RHS, Idx1, DAG, dl); 8296 8297 // Issue the operation on the smaller types and concatenate the result back 8298 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 8299 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 8300 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 8301 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC), 8302 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC)); 8303} 8304 8305 8306SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const { 8307 SDValue Cond; 8308 SDValue Op0 = Op.getOperand(0); 8309 SDValue Op1 = Op.getOperand(1); 8310 SDValue CC = Op.getOperand(2); 8311 EVT VT = Op.getValueType(); 8312 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 8313 bool isFP = Op.getOperand(1).getValueType().isFloatingPoint(); 8314 DebugLoc dl = Op.getDebugLoc(); 8315 8316 if (isFP) { 8317 unsigned SSECC = 8; 8318 EVT EltVT = Op0.getValueType().getVectorElementType(); 8319 assert(EltVT == MVT::f32 || EltVT == MVT::f64); 8320 8321 bool Swap = false; 8322 8323 // SSE Condition code mapping: 8324 // 0 - EQ 8325 // 1 - LT 8326 // 2 - LE 8327 // 3 - UNORD 8328 // 4 - NEQ 8329 // 5 - NLT 8330 // 6 - NLE 8331 // 7 - ORD 8332 switch (SetCCOpcode) { 8333 default: break; 8334 case ISD::SETOEQ: 8335 case ISD::SETEQ: SSECC = 0; break; 8336 case ISD::SETOGT: 8337 case ISD::SETGT: Swap = true; // Fallthrough 8338 case ISD::SETLT: 8339 case ISD::SETOLT: SSECC = 1; break; 8340 case ISD::SETOGE: 8341 case ISD::SETGE: Swap = true; // Fallthrough 8342 case ISD::SETLE: 8343 case ISD::SETOLE: SSECC = 2; break; 8344 case ISD::SETUO: SSECC = 3; break; 8345 case ISD::SETUNE: 8346 case ISD::SETNE: SSECC = 4; break; 8347 case ISD::SETULE: Swap = true; 8348 case ISD::SETUGE: SSECC = 5; break; 8349 case ISD::SETULT: Swap = true; 8350 case ISD::SETUGT: SSECC = 6; break; 8351 case ISD::SETO: SSECC = 7; break; 8352 } 8353 if (Swap) 8354 std::swap(Op0, Op1); 8355 8356 // In the two special cases we can't handle, emit two comparisons. 8357 if (SSECC == 8) { 8358 if (SetCCOpcode == ISD::SETUEQ) { 8359 SDValue UNORD, EQ; 8360 UNORD = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 8361 DAG.getConstant(3, MVT::i8)); 8362 EQ = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 8363 DAG.getConstant(0, MVT::i8)); 8364 return DAG.getNode(ISD::OR, dl, VT, UNORD, EQ); 8365 } else if (SetCCOpcode == ISD::SETONE) { 8366 SDValue ORD, NEQ; 8367 ORD = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 8368 DAG.getConstant(7, MVT::i8)); 8369 NEQ = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 8370 DAG.getConstant(4, MVT::i8)); 8371 return DAG.getNode(ISD::AND, dl, VT, ORD, NEQ); 8372 } 8373 llvm_unreachable("Illegal FP comparison"); 8374 } 8375 // Handle all other FP comparisons here. 8376 return DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 8377 DAG.getConstant(SSECC, MVT::i8)); 8378 } 8379 8380 // Break 256-bit integer vector compare into smaller ones. 8381 if (VT.getSizeInBits() == 256 && !Subtarget->hasAVX2()) 8382 return Lower256IntVSETCC(Op, DAG); 8383 8384 // We are handling one of the integer comparisons here. Since SSE only has 8385 // GT and EQ comparisons for integer, swapping operands and multiple 8386 // operations may be required for some comparisons. 8387 unsigned Opc = 0; 8388 bool Swap = false, Invert = false, FlipSigns = false; 8389 8390 switch (SetCCOpcode) { 8391 default: break; 8392 case ISD::SETNE: Invert = true; 8393 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break; 8394 case ISD::SETLT: Swap = true; 8395 case ISD::SETGT: Opc = X86ISD::PCMPGT; break; 8396 case ISD::SETGE: Swap = true; 8397 case ISD::SETLE: Opc = X86ISD::PCMPGT; Invert = true; break; 8398 case ISD::SETULT: Swap = true; 8399 case ISD::SETUGT: Opc = X86ISD::PCMPGT; FlipSigns = true; break; 8400 case ISD::SETUGE: Swap = true; 8401 case ISD::SETULE: Opc = X86ISD::PCMPGT; FlipSigns = true; Invert = true; break; 8402 } 8403 if (Swap) 8404 std::swap(Op0, Op1); 8405 8406 // Check that the operation in question is available (most are plain SSE2, 8407 // but PCMPGTQ and PCMPEQQ have different requirements). 8408 if (Opc == X86ISD::PCMPGT && VT == MVT::v2i64 && !Subtarget->hasSSE42()) 8409 return SDValue(); 8410 if (Opc == X86ISD::PCMPEQ && VT == MVT::v2i64 && !Subtarget->hasSSE41()) 8411 return SDValue(); 8412 8413 // Since SSE has no unsigned integer comparisons, we need to flip the sign 8414 // bits of the inputs before performing those operations. 8415 if (FlipSigns) { 8416 EVT EltVT = VT.getVectorElementType(); 8417 SDValue SignBit = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), 8418 EltVT); 8419 std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit); 8420 SDValue SignVec = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &SignBits[0], 8421 SignBits.size()); 8422 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SignVec); 8423 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SignVec); 8424 } 8425 8426 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 8427 8428 // If the logical-not of the result is required, perform that now. 8429 if (Invert) 8430 Result = DAG.getNOT(dl, Result, VT); 8431 8432 return Result; 8433} 8434 8435// isX86LogicalCmp - Return true if opcode is a X86 logical comparison. 8436static bool isX86LogicalCmp(SDValue Op) { 8437 unsigned Opc = Op.getNode()->getOpcode(); 8438 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) 8439 return true; 8440 if (Op.getResNo() == 1 && 8441 (Opc == X86ISD::ADD || 8442 Opc == X86ISD::SUB || 8443 Opc == X86ISD::ADC || 8444 Opc == X86ISD::SBB || 8445 Opc == X86ISD::SMUL || 8446 Opc == X86ISD::UMUL || 8447 Opc == X86ISD::INC || 8448 Opc == X86ISD::DEC || 8449 Opc == X86ISD::OR || 8450 Opc == X86ISD::XOR || 8451 Opc == X86ISD::AND)) 8452 return true; 8453 8454 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL) 8455 return true; 8456 8457 return false; 8458} 8459 8460static bool isZero(SDValue V) { 8461 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 8462 return C && C->isNullValue(); 8463} 8464 8465static bool isAllOnes(SDValue V) { 8466 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 8467 return C && C->isAllOnesValue(); 8468} 8469 8470SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 8471 bool addTest = true; 8472 SDValue Cond = Op.getOperand(0); 8473 SDValue Op1 = Op.getOperand(1); 8474 SDValue Op2 = Op.getOperand(2); 8475 DebugLoc DL = Op.getDebugLoc(); 8476 SDValue CC; 8477 8478 if (Cond.getOpcode() == ISD::SETCC) { 8479 SDValue NewCond = LowerSETCC(Cond, DAG); 8480 if (NewCond.getNode()) 8481 Cond = NewCond; 8482 } 8483 8484 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y 8485 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y 8486 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y 8487 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y 8488 if (Cond.getOpcode() == X86ISD::SETCC && 8489 Cond.getOperand(1).getOpcode() == X86ISD::CMP && 8490 isZero(Cond.getOperand(1).getOperand(1))) { 8491 SDValue Cmp = Cond.getOperand(1); 8492 8493 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue(); 8494 8495 if ((isAllOnes(Op1) || isAllOnes(Op2)) && 8496 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) { 8497 SDValue Y = isAllOnes(Op2) ? Op1 : Op2; 8498 8499 SDValue CmpOp0 = Cmp.getOperand(0); 8500 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, 8501 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); 8502 8503 SDValue Res = // Res = 0 or -1. 8504 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 8505 DAG.getConstant(X86::COND_B, MVT::i8), Cmp); 8506 8507 if (isAllOnes(Op1) != (CondCode == X86::COND_E)) 8508 Res = DAG.getNOT(DL, Res, Res.getValueType()); 8509 8510 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2); 8511 if (N2C == 0 || !N2C->isNullValue()) 8512 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y); 8513 return Res; 8514 } 8515 } 8516 8517 // Look past (and (setcc_carry (cmp ...)), 1). 8518 if (Cond.getOpcode() == ISD::AND && 8519 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 8520 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 8521 if (C && C->getAPIntValue() == 1) 8522 Cond = Cond.getOperand(0); 8523 } 8524 8525 // If condition flag is set by a X86ISD::CMP, then use it as the condition 8526 // setting operand in place of the X86ISD::SETCC. 8527 unsigned CondOpcode = Cond.getOpcode(); 8528 if (CondOpcode == X86ISD::SETCC || 8529 CondOpcode == X86ISD::SETCC_CARRY) { 8530 CC = Cond.getOperand(0); 8531 8532 SDValue Cmp = Cond.getOperand(1); 8533 unsigned Opc = Cmp.getOpcode(); 8534 EVT VT = Op.getValueType(); 8535 8536 bool IllegalFPCMov = false; 8537 if (VT.isFloatingPoint() && !VT.isVector() && 8538 !isScalarFPTypeInSSEReg(VT)) // FPStack? 8539 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue()); 8540 8541 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) || 8542 Opc == X86ISD::BT) { // FIXME 8543 Cond = Cmp; 8544 addTest = false; 8545 } 8546 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || 8547 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || 8548 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && 8549 Cond.getOperand(0).getValueType() != MVT::i8)) { 8550 SDValue LHS = Cond.getOperand(0); 8551 SDValue RHS = Cond.getOperand(1); 8552 unsigned X86Opcode; 8553 unsigned X86Cond; 8554 SDVTList VTs; 8555 switch (CondOpcode) { 8556 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; 8557 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; 8558 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; 8559 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; 8560 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; 8561 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; 8562 default: llvm_unreachable("unexpected overflowing operator"); 8563 } 8564 if (CondOpcode == ISD::UMULO) 8565 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), 8566 MVT::i32); 8567 else 8568 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 8569 8570 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS); 8571 8572 if (CondOpcode == ISD::UMULO) 8573 Cond = X86Op.getValue(2); 8574 else 8575 Cond = X86Op.getValue(1); 8576 8577 CC = DAG.getConstant(X86Cond, MVT::i8); 8578 addTest = false; 8579 } 8580 8581 if (addTest) { 8582 // Look pass the truncate. 8583 if (Cond.getOpcode() == ISD::TRUNCATE) 8584 Cond = Cond.getOperand(0); 8585 8586 // We know the result of AND is compared against zero. Try to match 8587 // it to BT. 8588 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 8589 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG); 8590 if (NewSetCC.getNode()) { 8591 CC = NewSetCC.getOperand(0); 8592 Cond = NewSetCC.getOperand(1); 8593 addTest = false; 8594 } 8595 } 8596 } 8597 8598 if (addTest) { 8599 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 8600 Cond = EmitTest(Cond, X86::COND_NE, DAG); 8601 } 8602 8603 // a < b ? -1 : 0 -> RES = ~setcc_carry 8604 // a < b ? 0 : -1 -> RES = setcc_carry 8605 // a >= b ? -1 : 0 -> RES = setcc_carry 8606 // a >= b ? 0 : -1 -> RES = ~setcc_carry 8607 if (Cond.getOpcode() == X86ISD::CMP) { 8608 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue(); 8609 8610 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) && 8611 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) { 8612 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 8613 DAG.getConstant(X86::COND_B, MVT::i8), Cond); 8614 if (isAllOnes(Op1) != (CondCode == X86::COND_B)) 8615 return DAG.getNOT(DL, Res, Res.getValueType()); 8616 return Res; 8617 } 8618 } 8619 8620 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 8621 // condition is true. 8622 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 8623 SDValue Ops[] = { Op2, Op1, CC, Cond }; 8624 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops, array_lengthof(Ops)); 8625} 8626 8627// isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or 8628// ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart 8629// from the AND / OR. 8630static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) { 8631 Opc = Op.getOpcode(); 8632 if (Opc != ISD::OR && Opc != ISD::AND) 8633 return false; 8634 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC && 8635 Op.getOperand(0).hasOneUse() && 8636 Op.getOperand(1).getOpcode() == X86ISD::SETCC && 8637 Op.getOperand(1).hasOneUse()); 8638} 8639 8640// isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and 8641// 1 and that the SETCC node has a single use. 8642static bool isXor1OfSetCC(SDValue Op) { 8643 if (Op.getOpcode() != ISD::XOR) 8644 return false; 8645 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 8646 if (N1C && N1C->getAPIntValue() == 1) { 8647 return Op.getOperand(0).getOpcode() == X86ISD::SETCC && 8648 Op.getOperand(0).hasOneUse(); 8649 } 8650 return false; 8651} 8652 8653SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 8654 bool addTest = true; 8655 SDValue Chain = Op.getOperand(0); 8656 SDValue Cond = Op.getOperand(1); 8657 SDValue Dest = Op.getOperand(2); 8658 DebugLoc dl = Op.getDebugLoc(); 8659 SDValue CC; 8660 bool Inverted = false; 8661 8662 if (Cond.getOpcode() == ISD::SETCC) { 8663 // Check for setcc([su]{add,sub,mul}o == 0). 8664 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ && 8665 isa<ConstantSDNode>(Cond.getOperand(1)) && 8666 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() && 8667 Cond.getOperand(0).getResNo() == 1 && 8668 (Cond.getOperand(0).getOpcode() == ISD::SADDO || 8669 Cond.getOperand(0).getOpcode() == ISD::UADDO || 8670 Cond.getOperand(0).getOpcode() == ISD::SSUBO || 8671 Cond.getOperand(0).getOpcode() == ISD::USUBO || 8672 Cond.getOperand(0).getOpcode() == ISD::SMULO || 8673 Cond.getOperand(0).getOpcode() == ISD::UMULO)) { 8674 Inverted = true; 8675 Cond = Cond.getOperand(0); 8676 } else { 8677 SDValue NewCond = LowerSETCC(Cond, DAG); 8678 if (NewCond.getNode()) 8679 Cond = NewCond; 8680 } 8681 } 8682#if 0 8683 // FIXME: LowerXALUO doesn't handle these!! 8684 else if (Cond.getOpcode() == X86ISD::ADD || 8685 Cond.getOpcode() == X86ISD::SUB || 8686 Cond.getOpcode() == X86ISD::SMUL || 8687 Cond.getOpcode() == X86ISD::UMUL) 8688 Cond = LowerXALUO(Cond, DAG); 8689#endif 8690 8691 // Look pass (and (setcc_carry (cmp ...)), 1). 8692 if (Cond.getOpcode() == ISD::AND && 8693 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 8694 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 8695 if (C && C->getAPIntValue() == 1) 8696 Cond = Cond.getOperand(0); 8697 } 8698 8699 // If condition flag is set by a X86ISD::CMP, then use it as the condition 8700 // setting operand in place of the X86ISD::SETCC. 8701 unsigned CondOpcode = Cond.getOpcode(); 8702 if (CondOpcode == X86ISD::SETCC || 8703 CondOpcode == X86ISD::SETCC_CARRY) { 8704 CC = Cond.getOperand(0); 8705 8706 SDValue Cmp = Cond.getOperand(1); 8707 unsigned Opc = Cmp.getOpcode(); 8708 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp?? 8709 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) { 8710 Cond = Cmp; 8711 addTest = false; 8712 } else { 8713 switch (cast<ConstantSDNode>(CC)->getZExtValue()) { 8714 default: break; 8715 case X86::COND_O: 8716 case X86::COND_B: 8717 // These can only come from an arithmetic instruction with overflow, 8718 // e.g. SADDO, UADDO. 8719 Cond = Cond.getNode()->getOperand(1); 8720 addTest = false; 8721 break; 8722 } 8723 } 8724 } 8725 CondOpcode = Cond.getOpcode(); 8726 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || 8727 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || 8728 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && 8729 Cond.getOperand(0).getValueType() != MVT::i8)) { 8730 SDValue LHS = Cond.getOperand(0); 8731 SDValue RHS = Cond.getOperand(1); 8732 unsigned X86Opcode; 8733 unsigned X86Cond; 8734 SDVTList VTs; 8735 switch (CondOpcode) { 8736 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; 8737 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; 8738 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; 8739 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; 8740 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; 8741 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; 8742 default: llvm_unreachable("unexpected overflowing operator"); 8743 } 8744 if (Inverted) 8745 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond); 8746 if (CondOpcode == ISD::UMULO) 8747 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), 8748 MVT::i32); 8749 else 8750 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 8751 8752 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS); 8753 8754 if (CondOpcode == ISD::UMULO) 8755 Cond = X86Op.getValue(2); 8756 else 8757 Cond = X86Op.getValue(1); 8758 8759 CC = DAG.getConstant(X86Cond, MVT::i8); 8760 addTest = false; 8761 } else { 8762 unsigned CondOpc; 8763 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) { 8764 SDValue Cmp = Cond.getOperand(0).getOperand(1); 8765 if (CondOpc == ISD::OR) { 8766 // Also, recognize the pattern generated by an FCMP_UNE. We can emit 8767 // two branches instead of an explicit OR instruction with a 8768 // separate test. 8769 if (Cmp == Cond.getOperand(1).getOperand(1) && 8770 isX86LogicalCmp(Cmp)) { 8771 CC = Cond.getOperand(0).getOperand(0); 8772 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 8773 Chain, Dest, CC, Cmp); 8774 CC = Cond.getOperand(1).getOperand(0); 8775 Cond = Cmp; 8776 addTest = false; 8777 } 8778 } else { // ISD::AND 8779 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit 8780 // two branches instead of an explicit AND instruction with a 8781 // separate test. However, we only do this if this block doesn't 8782 // have a fall-through edge, because this requires an explicit 8783 // jmp when the condition is false. 8784 if (Cmp == Cond.getOperand(1).getOperand(1) && 8785 isX86LogicalCmp(Cmp) && 8786 Op.getNode()->hasOneUse()) { 8787 X86::CondCode CCode = 8788 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 8789 CCode = X86::GetOppositeBranchCondition(CCode); 8790 CC = DAG.getConstant(CCode, MVT::i8); 8791 SDNode *User = *Op.getNode()->use_begin(); 8792 // Look for an unconditional branch following this conditional branch. 8793 // We need this because we need to reverse the successors in order 8794 // to implement FCMP_OEQ. 8795 if (User->getOpcode() == ISD::BR) { 8796 SDValue FalseBB = User->getOperand(1); 8797 SDNode *NewBR = 8798 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 8799 assert(NewBR == User); 8800 (void)NewBR; 8801 Dest = FalseBB; 8802 8803 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 8804 Chain, Dest, CC, Cmp); 8805 X86::CondCode CCode = 8806 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0); 8807 CCode = X86::GetOppositeBranchCondition(CCode); 8808 CC = DAG.getConstant(CCode, MVT::i8); 8809 Cond = Cmp; 8810 addTest = false; 8811 } 8812 } 8813 } 8814 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) { 8815 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition. 8816 // It should be transformed during dag combiner except when the condition 8817 // is set by a arithmetics with overflow node. 8818 X86::CondCode CCode = 8819 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 8820 CCode = X86::GetOppositeBranchCondition(CCode); 8821 CC = DAG.getConstant(CCode, MVT::i8); 8822 Cond = Cond.getOperand(0).getOperand(1); 8823 addTest = false; 8824 } else if (Cond.getOpcode() == ISD::SETCC && 8825 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) { 8826 // For FCMP_OEQ, we can emit 8827 // two branches instead of an explicit AND instruction with a 8828 // separate test. However, we only do this if this block doesn't 8829 // have a fall-through edge, because this requires an explicit 8830 // jmp when the condition is false. 8831 if (Op.getNode()->hasOneUse()) { 8832 SDNode *User = *Op.getNode()->use_begin(); 8833 // Look for an unconditional branch following this conditional branch. 8834 // We need this because we need to reverse the successors in order 8835 // to implement FCMP_OEQ. 8836 if (User->getOpcode() == ISD::BR) { 8837 SDValue FalseBB = User->getOperand(1); 8838 SDNode *NewBR = 8839 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 8840 assert(NewBR == User); 8841 (void)NewBR; 8842 Dest = FalseBB; 8843 8844 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 8845 Cond.getOperand(0), Cond.getOperand(1)); 8846 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 8847 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 8848 Chain, Dest, CC, Cmp); 8849 CC = DAG.getConstant(X86::COND_P, MVT::i8); 8850 Cond = Cmp; 8851 addTest = false; 8852 } 8853 } 8854 } else if (Cond.getOpcode() == ISD::SETCC && 8855 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) { 8856 // For FCMP_UNE, we can emit 8857 // two branches instead of an explicit AND instruction with a 8858 // separate test. However, we only do this if this block doesn't 8859 // have a fall-through edge, because this requires an explicit 8860 // jmp when the condition is false. 8861 if (Op.getNode()->hasOneUse()) { 8862 SDNode *User = *Op.getNode()->use_begin(); 8863 // Look for an unconditional branch following this conditional branch. 8864 // We need this because we need to reverse the successors in order 8865 // to implement FCMP_UNE. 8866 if (User->getOpcode() == ISD::BR) { 8867 SDValue FalseBB = User->getOperand(1); 8868 SDNode *NewBR = 8869 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 8870 assert(NewBR == User); 8871 (void)NewBR; 8872 8873 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 8874 Cond.getOperand(0), Cond.getOperand(1)); 8875 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 8876 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 8877 Chain, Dest, CC, Cmp); 8878 CC = DAG.getConstant(X86::COND_NP, MVT::i8); 8879 Cond = Cmp; 8880 addTest = false; 8881 Dest = FalseBB; 8882 } 8883 } 8884 } 8885 } 8886 8887 if (addTest) { 8888 // Look pass the truncate. 8889 if (Cond.getOpcode() == ISD::TRUNCATE) 8890 Cond = Cond.getOperand(0); 8891 8892 // We know the result of AND is compared against zero. Try to match 8893 // it to BT. 8894 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 8895 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG); 8896 if (NewSetCC.getNode()) { 8897 CC = NewSetCC.getOperand(0); 8898 Cond = NewSetCC.getOperand(1); 8899 addTest = false; 8900 } 8901 } 8902 } 8903 8904 if (addTest) { 8905 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 8906 Cond = EmitTest(Cond, X86::COND_NE, DAG); 8907 } 8908 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 8909 Chain, Dest, CC, Cond); 8910} 8911 8912 8913// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 8914// Calls to _alloca is needed to probe the stack when allocating more than 4k 8915// bytes in one go. Touching the stack at 4K increments is necessary to ensure 8916// that the guard pages used by the OS virtual memory manager are allocated in 8917// correct sequence. 8918SDValue 8919X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 8920 SelectionDAG &DAG) const { 8921 assert((Subtarget->isTargetCygMing() || Subtarget->isTargetWindows() || 8922 getTargetMachine().Options.EnableSegmentedStacks) && 8923 "This should be used only on Windows targets or when segmented stacks " 8924 "are being used"); 8925 assert(!Subtarget->isTargetEnvMacho() && "Not implemented"); 8926 DebugLoc dl = Op.getDebugLoc(); 8927 8928 // Get the inputs. 8929 SDValue Chain = Op.getOperand(0); 8930 SDValue Size = Op.getOperand(1); 8931 // FIXME: Ensure alignment here 8932 8933 bool Is64Bit = Subtarget->is64Bit(); 8934 EVT SPTy = Is64Bit ? MVT::i64 : MVT::i32; 8935 8936 if (getTargetMachine().Options.EnableSegmentedStacks) { 8937 MachineFunction &MF = DAG.getMachineFunction(); 8938 MachineRegisterInfo &MRI = MF.getRegInfo(); 8939 8940 if (Is64Bit) { 8941 // The 64 bit implementation of segmented stacks needs to clobber both r10 8942 // r11. This makes it impossible to use it along with nested parameters. 8943 const Function *F = MF.getFunction(); 8944 8945 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 8946 I != E; I++) 8947 if (I->hasNestAttr()) 8948 report_fatal_error("Cannot use segmented stacks with functions that " 8949 "have nested arguments."); 8950 } 8951 8952 const TargetRegisterClass *AddrRegClass = 8953 getRegClassFor(Subtarget->is64Bit() ? MVT::i64:MVT::i32); 8954 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass); 8955 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size); 8956 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain, 8957 DAG.getRegister(Vreg, SPTy)); 8958 SDValue Ops1[2] = { Value, Chain }; 8959 return DAG.getMergeValues(Ops1, 2, dl); 8960 } else { 8961 SDValue Flag; 8962 unsigned Reg = (Subtarget->is64Bit() ? X86::RAX : X86::EAX); 8963 8964 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag); 8965 Flag = Chain.getValue(1); 8966 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 8967 8968 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag); 8969 Flag = Chain.getValue(1); 8970 8971 Chain = DAG.getCopyFromReg(Chain, dl, X86StackPtr, SPTy).getValue(1); 8972 8973 SDValue Ops1[2] = { Chain.getValue(0), Chain }; 8974 return DAG.getMergeValues(Ops1, 2, dl); 8975 } 8976} 8977 8978SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 8979 MachineFunction &MF = DAG.getMachineFunction(); 8980 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 8981 8982 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 8983 DebugLoc DL = Op.getDebugLoc(); 8984 8985 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) { 8986 // vastart just stores the address of the VarArgsFrameIndex slot into the 8987 // memory location argument. 8988 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 8989 getPointerTy()); 8990 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), 8991 MachinePointerInfo(SV), false, false, 0); 8992 } 8993 8994 // __va_list_tag: 8995 // gp_offset (0 - 6 * 8) 8996 // fp_offset (48 - 48 + 8 * 16) 8997 // overflow_arg_area (point to parameters coming in memory). 8998 // reg_save_area 8999 SmallVector<SDValue, 8> MemOps; 9000 SDValue FIN = Op.getOperand(1); 9001 // Store gp_offset 9002 SDValue Store = DAG.getStore(Op.getOperand(0), DL, 9003 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), 9004 MVT::i32), 9005 FIN, MachinePointerInfo(SV), false, false, 0); 9006 MemOps.push_back(Store); 9007 9008 // Store fp_offset 9009 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 9010 FIN, DAG.getIntPtrConstant(4)); 9011 Store = DAG.getStore(Op.getOperand(0), DL, 9012 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), 9013 MVT::i32), 9014 FIN, MachinePointerInfo(SV, 4), false, false, 0); 9015 MemOps.push_back(Store); 9016 9017 // Store ptr to overflow_arg_area 9018 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 9019 FIN, DAG.getIntPtrConstant(4)); 9020 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 9021 getPointerTy()); 9022 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, 9023 MachinePointerInfo(SV, 8), 9024 false, false, 0); 9025 MemOps.push_back(Store); 9026 9027 // Store ptr to reg_save_area. 9028 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 9029 FIN, DAG.getIntPtrConstant(8)); 9030 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 9031 getPointerTy()); 9032 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN, 9033 MachinePointerInfo(SV, 16), false, false, 0); 9034 MemOps.push_back(Store); 9035 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 9036 &MemOps[0], MemOps.size()); 9037} 9038 9039SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 9040 assert(Subtarget->is64Bit() && 9041 "LowerVAARG only handles 64-bit va_arg!"); 9042 assert((Subtarget->isTargetLinux() || 9043 Subtarget->isTargetDarwin()) && 9044 "Unhandled target in LowerVAARG"); 9045 assert(Op.getNode()->getNumOperands() == 4); 9046 SDValue Chain = Op.getOperand(0); 9047 SDValue SrcPtr = Op.getOperand(1); 9048 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 9049 unsigned Align = Op.getConstantOperandVal(3); 9050 DebugLoc dl = Op.getDebugLoc(); 9051 9052 EVT ArgVT = Op.getNode()->getValueType(0); 9053 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 9054 uint32_t ArgSize = getTargetData()->getTypeAllocSize(ArgTy); 9055 uint8_t ArgMode; 9056 9057 // Decide which area this value should be read from. 9058 // TODO: Implement the AMD64 ABI in its entirety. This simple 9059 // selection mechanism works only for the basic types. 9060 if (ArgVT == MVT::f80) { 9061 llvm_unreachable("va_arg for f80 not yet implemented"); 9062 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) { 9063 ArgMode = 2; // Argument passed in XMM register. Use fp_offset. 9064 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) { 9065 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset. 9066 } else { 9067 llvm_unreachable("Unhandled argument type in LowerVAARG"); 9068 } 9069 9070 if (ArgMode == 2) { 9071 // Sanity Check: Make sure using fp_offset makes sense. 9072 assert(!getTargetMachine().Options.UseSoftFloat && 9073 !(DAG.getMachineFunction() 9074 .getFunction()->hasFnAttr(Attribute::NoImplicitFloat)) && 9075 Subtarget->hasSSE1()); 9076 } 9077 9078 // Insert VAARG_64 node into the DAG 9079 // VAARG_64 returns two values: Variable Argument Address, Chain 9080 SmallVector<SDValue, 11> InstOps; 9081 InstOps.push_back(Chain); 9082 InstOps.push_back(SrcPtr); 9083 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32)); 9084 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8)); 9085 InstOps.push_back(DAG.getConstant(Align, MVT::i32)); 9086 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other); 9087 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl, 9088 VTs, &InstOps[0], InstOps.size(), 9089 MVT::i64, 9090 MachinePointerInfo(SV), 9091 /*Align=*/0, 9092 /*Volatile=*/false, 9093 /*ReadMem=*/true, 9094 /*WriteMem=*/true); 9095 Chain = VAARG.getValue(1); 9096 9097 // Load the next argument and return it 9098 return DAG.getLoad(ArgVT, dl, 9099 Chain, 9100 VAARG, 9101 MachinePointerInfo(), 9102 false, false, false, 0); 9103} 9104 9105SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 9106 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 9107 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!"); 9108 SDValue Chain = Op.getOperand(0); 9109 SDValue DstPtr = Op.getOperand(1); 9110 SDValue SrcPtr = Op.getOperand(2); 9111 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 9112 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 9113 DebugLoc DL = Op.getDebugLoc(); 9114 9115 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, 9116 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false, 9117 false, 9118 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 9119} 9120 9121// getTargetVShiftNOde - Handle vector element shifts where the shift amount 9122// may or may not be a constant. Takes immediate version of shift as input. 9123static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT, 9124 SDValue SrcOp, SDValue ShAmt, 9125 SelectionDAG &DAG) { 9126 assert(ShAmt.getValueType() == MVT::i32 && "ShAmt is not i32"); 9127 9128 if (isa<ConstantSDNode>(ShAmt)) { 9129 switch (Opc) { 9130 default: llvm_unreachable("Unknown target vector shift node"); 9131 case X86ISD::VSHLI: 9132 case X86ISD::VSRLI: 9133 case X86ISD::VSRAI: 9134 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt); 9135 } 9136 } 9137 9138 // Change opcode to non-immediate version 9139 switch (Opc) { 9140 default: llvm_unreachable("Unknown target vector shift node"); 9141 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break; 9142 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break; 9143 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break; 9144 } 9145 9146 // Need to build a vector containing shift amount 9147 // Shift amount is 32-bits, but SSE instructions read 64-bit, so fill with 0 9148 SDValue ShOps[4]; 9149 ShOps[0] = ShAmt; 9150 ShOps[1] = DAG.getConstant(0, MVT::i32); 9151 ShOps[2] = DAG.getUNDEF(MVT::i32); 9152 ShOps[3] = DAG.getUNDEF(MVT::i32); 9153 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &ShOps[0], 4); 9154 ShAmt = DAG.getNode(ISD::BITCAST, dl, VT, ShAmt); 9155 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt); 9156} 9157 9158SDValue 9159X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { 9160 DebugLoc dl = Op.getDebugLoc(); 9161 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9162 switch (IntNo) { 9163 default: return SDValue(); // Don't custom lower most intrinsics. 9164 // Comparison intrinsics. 9165 case Intrinsic::x86_sse_comieq_ss: 9166 case Intrinsic::x86_sse_comilt_ss: 9167 case Intrinsic::x86_sse_comile_ss: 9168 case Intrinsic::x86_sse_comigt_ss: 9169 case Intrinsic::x86_sse_comige_ss: 9170 case Intrinsic::x86_sse_comineq_ss: 9171 case Intrinsic::x86_sse_ucomieq_ss: 9172 case Intrinsic::x86_sse_ucomilt_ss: 9173 case Intrinsic::x86_sse_ucomile_ss: 9174 case Intrinsic::x86_sse_ucomigt_ss: 9175 case Intrinsic::x86_sse_ucomige_ss: 9176 case Intrinsic::x86_sse_ucomineq_ss: 9177 case Intrinsic::x86_sse2_comieq_sd: 9178 case Intrinsic::x86_sse2_comilt_sd: 9179 case Intrinsic::x86_sse2_comile_sd: 9180 case Intrinsic::x86_sse2_comigt_sd: 9181 case Intrinsic::x86_sse2_comige_sd: 9182 case Intrinsic::x86_sse2_comineq_sd: 9183 case Intrinsic::x86_sse2_ucomieq_sd: 9184 case Intrinsic::x86_sse2_ucomilt_sd: 9185 case Intrinsic::x86_sse2_ucomile_sd: 9186 case Intrinsic::x86_sse2_ucomigt_sd: 9187 case Intrinsic::x86_sse2_ucomige_sd: 9188 case Intrinsic::x86_sse2_ucomineq_sd: { 9189 unsigned Opc = 0; 9190 ISD::CondCode CC = ISD::SETCC_INVALID; 9191 switch (IntNo) { 9192 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 9193 case Intrinsic::x86_sse_comieq_ss: 9194 case Intrinsic::x86_sse2_comieq_sd: 9195 Opc = X86ISD::COMI; 9196 CC = ISD::SETEQ; 9197 break; 9198 case Intrinsic::x86_sse_comilt_ss: 9199 case Intrinsic::x86_sse2_comilt_sd: 9200 Opc = X86ISD::COMI; 9201 CC = ISD::SETLT; 9202 break; 9203 case Intrinsic::x86_sse_comile_ss: 9204 case Intrinsic::x86_sse2_comile_sd: 9205 Opc = X86ISD::COMI; 9206 CC = ISD::SETLE; 9207 break; 9208 case Intrinsic::x86_sse_comigt_ss: 9209 case Intrinsic::x86_sse2_comigt_sd: 9210 Opc = X86ISD::COMI; 9211 CC = ISD::SETGT; 9212 break; 9213 case Intrinsic::x86_sse_comige_ss: 9214 case Intrinsic::x86_sse2_comige_sd: 9215 Opc = X86ISD::COMI; 9216 CC = ISD::SETGE; 9217 break; 9218 case Intrinsic::x86_sse_comineq_ss: 9219 case Intrinsic::x86_sse2_comineq_sd: 9220 Opc = X86ISD::COMI; 9221 CC = ISD::SETNE; 9222 break; 9223 case Intrinsic::x86_sse_ucomieq_ss: 9224 case Intrinsic::x86_sse2_ucomieq_sd: 9225 Opc = X86ISD::UCOMI; 9226 CC = ISD::SETEQ; 9227 break; 9228 case Intrinsic::x86_sse_ucomilt_ss: 9229 case Intrinsic::x86_sse2_ucomilt_sd: 9230 Opc = X86ISD::UCOMI; 9231 CC = ISD::SETLT; 9232 break; 9233 case Intrinsic::x86_sse_ucomile_ss: 9234 case Intrinsic::x86_sse2_ucomile_sd: 9235 Opc = X86ISD::UCOMI; 9236 CC = ISD::SETLE; 9237 break; 9238 case Intrinsic::x86_sse_ucomigt_ss: 9239 case Intrinsic::x86_sse2_ucomigt_sd: 9240 Opc = X86ISD::UCOMI; 9241 CC = ISD::SETGT; 9242 break; 9243 case Intrinsic::x86_sse_ucomige_ss: 9244 case Intrinsic::x86_sse2_ucomige_sd: 9245 Opc = X86ISD::UCOMI; 9246 CC = ISD::SETGE; 9247 break; 9248 case Intrinsic::x86_sse_ucomineq_ss: 9249 case Intrinsic::x86_sse2_ucomineq_sd: 9250 Opc = X86ISD::UCOMI; 9251 CC = ISD::SETNE; 9252 break; 9253 } 9254 9255 SDValue LHS = Op.getOperand(1); 9256 SDValue RHS = Op.getOperand(2); 9257 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG); 9258 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!"); 9259 SDValue Cond = DAG.getNode(Opc, dl, MVT::i32, LHS, RHS); 9260 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 9261 DAG.getConstant(X86CC, MVT::i8), Cond); 9262 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 9263 } 9264 // XOP comparison intrinsics 9265 case Intrinsic::x86_xop_vpcomltb: 9266 case Intrinsic::x86_xop_vpcomltw: 9267 case Intrinsic::x86_xop_vpcomltd: 9268 case Intrinsic::x86_xop_vpcomltq: 9269 case Intrinsic::x86_xop_vpcomltub: 9270 case Intrinsic::x86_xop_vpcomltuw: 9271 case Intrinsic::x86_xop_vpcomltud: 9272 case Intrinsic::x86_xop_vpcomltuq: 9273 case Intrinsic::x86_xop_vpcomleb: 9274 case Intrinsic::x86_xop_vpcomlew: 9275 case Intrinsic::x86_xop_vpcomled: 9276 case Intrinsic::x86_xop_vpcomleq: 9277 case Intrinsic::x86_xop_vpcomleub: 9278 case Intrinsic::x86_xop_vpcomleuw: 9279 case Intrinsic::x86_xop_vpcomleud: 9280 case Intrinsic::x86_xop_vpcomleuq: 9281 case Intrinsic::x86_xop_vpcomgtb: 9282 case Intrinsic::x86_xop_vpcomgtw: 9283 case Intrinsic::x86_xop_vpcomgtd: 9284 case Intrinsic::x86_xop_vpcomgtq: 9285 case Intrinsic::x86_xop_vpcomgtub: 9286 case Intrinsic::x86_xop_vpcomgtuw: 9287 case Intrinsic::x86_xop_vpcomgtud: 9288 case Intrinsic::x86_xop_vpcomgtuq: 9289 case Intrinsic::x86_xop_vpcomgeb: 9290 case Intrinsic::x86_xop_vpcomgew: 9291 case Intrinsic::x86_xop_vpcomged: 9292 case Intrinsic::x86_xop_vpcomgeq: 9293 case Intrinsic::x86_xop_vpcomgeub: 9294 case Intrinsic::x86_xop_vpcomgeuw: 9295 case Intrinsic::x86_xop_vpcomgeud: 9296 case Intrinsic::x86_xop_vpcomgeuq: 9297 case Intrinsic::x86_xop_vpcomeqb: 9298 case Intrinsic::x86_xop_vpcomeqw: 9299 case Intrinsic::x86_xop_vpcomeqd: 9300 case Intrinsic::x86_xop_vpcomeqq: 9301 case Intrinsic::x86_xop_vpcomequb: 9302 case Intrinsic::x86_xop_vpcomequw: 9303 case Intrinsic::x86_xop_vpcomequd: 9304 case Intrinsic::x86_xop_vpcomequq: 9305 case Intrinsic::x86_xop_vpcomneb: 9306 case Intrinsic::x86_xop_vpcomnew: 9307 case Intrinsic::x86_xop_vpcomned: 9308 case Intrinsic::x86_xop_vpcomneq: 9309 case Intrinsic::x86_xop_vpcomneub: 9310 case Intrinsic::x86_xop_vpcomneuw: 9311 case Intrinsic::x86_xop_vpcomneud: 9312 case Intrinsic::x86_xop_vpcomneuq: 9313 case Intrinsic::x86_xop_vpcomfalseb: 9314 case Intrinsic::x86_xop_vpcomfalsew: 9315 case Intrinsic::x86_xop_vpcomfalsed: 9316 case Intrinsic::x86_xop_vpcomfalseq: 9317 case Intrinsic::x86_xop_vpcomfalseub: 9318 case Intrinsic::x86_xop_vpcomfalseuw: 9319 case Intrinsic::x86_xop_vpcomfalseud: 9320 case Intrinsic::x86_xop_vpcomfalseuq: 9321 case Intrinsic::x86_xop_vpcomtrueb: 9322 case Intrinsic::x86_xop_vpcomtruew: 9323 case Intrinsic::x86_xop_vpcomtrued: 9324 case Intrinsic::x86_xop_vpcomtrueq: 9325 case Intrinsic::x86_xop_vpcomtrueub: 9326 case Intrinsic::x86_xop_vpcomtrueuw: 9327 case Intrinsic::x86_xop_vpcomtrueud: 9328 case Intrinsic::x86_xop_vpcomtrueuq: { 9329 unsigned CC = 0; 9330 unsigned Opc = 0; 9331 9332 switch (IntNo) { 9333 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 9334 case Intrinsic::x86_xop_vpcomltb: 9335 case Intrinsic::x86_xop_vpcomltw: 9336 case Intrinsic::x86_xop_vpcomltd: 9337 case Intrinsic::x86_xop_vpcomltq: 9338 CC = 0; 9339 Opc = X86ISD::VPCOM; 9340 break; 9341 case Intrinsic::x86_xop_vpcomltub: 9342 case Intrinsic::x86_xop_vpcomltuw: 9343 case Intrinsic::x86_xop_vpcomltud: 9344 case Intrinsic::x86_xop_vpcomltuq: 9345 CC = 0; 9346 Opc = X86ISD::VPCOMU; 9347 break; 9348 case Intrinsic::x86_xop_vpcomleb: 9349 case Intrinsic::x86_xop_vpcomlew: 9350 case Intrinsic::x86_xop_vpcomled: 9351 case Intrinsic::x86_xop_vpcomleq: 9352 CC = 1; 9353 Opc = X86ISD::VPCOM; 9354 break; 9355 case Intrinsic::x86_xop_vpcomleub: 9356 case Intrinsic::x86_xop_vpcomleuw: 9357 case Intrinsic::x86_xop_vpcomleud: 9358 case Intrinsic::x86_xop_vpcomleuq: 9359 CC = 1; 9360 Opc = X86ISD::VPCOMU; 9361 break; 9362 case Intrinsic::x86_xop_vpcomgtb: 9363 case Intrinsic::x86_xop_vpcomgtw: 9364 case Intrinsic::x86_xop_vpcomgtd: 9365 case Intrinsic::x86_xop_vpcomgtq: 9366 CC = 2; 9367 Opc = X86ISD::VPCOM; 9368 break; 9369 case Intrinsic::x86_xop_vpcomgtub: 9370 case Intrinsic::x86_xop_vpcomgtuw: 9371 case Intrinsic::x86_xop_vpcomgtud: 9372 case Intrinsic::x86_xop_vpcomgtuq: 9373 CC = 2; 9374 Opc = X86ISD::VPCOMU; 9375 break; 9376 case Intrinsic::x86_xop_vpcomgeb: 9377 case Intrinsic::x86_xop_vpcomgew: 9378 case Intrinsic::x86_xop_vpcomged: 9379 case Intrinsic::x86_xop_vpcomgeq: 9380 CC = 3; 9381 Opc = X86ISD::VPCOM; 9382 break; 9383 case Intrinsic::x86_xop_vpcomgeub: 9384 case Intrinsic::x86_xop_vpcomgeuw: 9385 case Intrinsic::x86_xop_vpcomgeud: 9386 case Intrinsic::x86_xop_vpcomgeuq: 9387 CC = 3; 9388 Opc = X86ISD::VPCOMU; 9389 break; 9390 case Intrinsic::x86_xop_vpcomeqb: 9391 case Intrinsic::x86_xop_vpcomeqw: 9392 case Intrinsic::x86_xop_vpcomeqd: 9393 case Intrinsic::x86_xop_vpcomeqq: 9394 CC = 4; 9395 Opc = X86ISD::VPCOM; 9396 break; 9397 case Intrinsic::x86_xop_vpcomequb: 9398 case Intrinsic::x86_xop_vpcomequw: 9399 case Intrinsic::x86_xop_vpcomequd: 9400 case Intrinsic::x86_xop_vpcomequq: 9401 CC = 4; 9402 Opc = X86ISD::VPCOMU; 9403 break; 9404 case Intrinsic::x86_xop_vpcomneb: 9405 case Intrinsic::x86_xop_vpcomnew: 9406 case Intrinsic::x86_xop_vpcomned: 9407 case Intrinsic::x86_xop_vpcomneq: 9408 CC = 5; 9409 Opc = X86ISD::VPCOM; 9410 break; 9411 case Intrinsic::x86_xop_vpcomneub: 9412 case Intrinsic::x86_xop_vpcomneuw: 9413 case Intrinsic::x86_xop_vpcomneud: 9414 case Intrinsic::x86_xop_vpcomneuq: 9415 CC = 5; 9416 Opc = X86ISD::VPCOMU; 9417 break; 9418 case Intrinsic::x86_xop_vpcomfalseb: 9419 case Intrinsic::x86_xop_vpcomfalsew: 9420 case Intrinsic::x86_xop_vpcomfalsed: 9421 case Intrinsic::x86_xop_vpcomfalseq: 9422 CC = 6; 9423 Opc = X86ISD::VPCOM; 9424 break; 9425 case Intrinsic::x86_xop_vpcomfalseub: 9426 case Intrinsic::x86_xop_vpcomfalseuw: 9427 case Intrinsic::x86_xop_vpcomfalseud: 9428 case Intrinsic::x86_xop_vpcomfalseuq: 9429 CC = 6; 9430 Opc = X86ISD::VPCOMU; 9431 break; 9432 case Intrinsic::x86_xop_vpcomtrueb: 9433 case Intrinsic::x86_xop_vpcomtruew: 9434 case Intrinsic::x86_xop_vpcomtrued: 9435 case Intrinsic::x86_xop_vpcomtrueq: 9436 CC = 7; 9437 Opc = X86ISD::VPCOM; 9438 break; 9439 case Intrinsic::x86_xop_vpcomtrueub: 9440 case Intrinsic::x86_xop_vpcomtrueuw: 9441 case Intrinsic::x86_xop_vpcomtrueud: 9442 case Intrinsic::x86_xop_vpcomtrueuq: 9443 CC = 7; 9444 Opc = X86ISD::VPCOMU; 9445 break; 9446 } 9447 9448 SDValue LHS = Op.getOperand(1); 9449 SDValue RHS = Op.getOperand(2); 9450 return DAG.getNode(Opc, dl, Op.getValueType(), LHS, RHS, 9451 DAG.getConstant(CC, MVT::i8)); 9452 } 9453 9454 // Arithmetic intrinsics. 9455 case Intrinsic::x86_sse3_hadd_ps: 9456 case Intrinsic::x86_sse3_hadd_pd: 9457 case Intrinsic::x86_avx_hadd_ps_256: 9458 case Intrinsic::x86_avx_hadd_pd_256: 9459 return DAG.getNode(X86ISD::FHADD, dl, Op.getValueType(), 9460 Op.getOperand(1), Op.getOperand(2)); 9461 case Intrinsic::x86_sse3_hsub_ps: 9462 case Intrinsic::x86_sse3_hsub_pd: 9463 case Intrinsic::x86_avx_hsub_ps_256: 9464 case Intrinsic::x86_avx_hsub_pd_256: 9465 return DAG.getNode(X86ISD::FHSUB, dl, Op.getValueType(), 9466 Op.getOperand(1), Op.getOperand(2)); 9467 case Intrinsic::x86_ssse3_phadd_w_128: 9468 case Intrinsic::x86_ssse3_phadd_d_128: 9469 case Intrinsic::x86_avx2_phadd_w: 9470 case Intrinsic::x86_avx2_phadd_d: 9471 return DAG.getNode(X86ISD::HADD, dl, Op.getValueType(), 9472 Op.getOperand(1), Op.getOperand(2)); 9473 case Intrinsic::x86_ssse3_phsub_w_128: 9474 case Intrinsic::x86_ssse3_phsub_d_128: 9475 case Intrinsic::x86_avx2_phsub_w: 9476 case Intrinsic::x86_avx2_phsub_d: 9477 return DAG.getNode(X86ISD::HSUB, dl, Op.getValueType(), 9478 Op.getOperand(1), Op.getOperand(2)); 9479 case Intrinsic::x86_avx2_psllv_d: 9480 case Intrinsic::x86_avx2_psllv_q: 9481 case Intrinsic::x86_avx2_psllv_d_256: 9482 case Intrinsic::x86_avx2_psllv_q_256: 9483 return DAG.getNode(ISD::SHL, dl, Op.getValueType(), 9484 Op.getOperand(1), Op.getOperand(2)); 9485 case Intrinsic::x86_avx2_psrlv_d: 9486 case Intrinsic::x86_avx2_psrlv_q: 9487 case Intrinsic::x86_avx2_psrlv_d_256: 9488 case Intrinsic::x86_avx2_psrlv_q_256: 9489 return DAG.getNode(ISD::SRL, dl, Op.getValueType(), 9490 Op.getOperand(1), Op.getOperand(2)); 9491 case Intrinsic::x86_avx2_psrav_d: 9492 case Intrinsic::x86_avx2_psrav_d_256: 9493 return DAG.getNode(ISD::SRA, dl, Op.getValueType(), 9494 Op.getOperand(1), Op.getOperand(2)); 9495 case Intrinsic::x86_sse2_pcmpeq_b: 9496 case Intrinsic::x86_sse2_pcmpeq_w: 9497 case Intrinsic::x86_sse2_pcmpeq_d: 9498 case Intrinsic::x86_sse41_pcmpeqq: 9499 case Intrinsic::x86_avx2_pcmpeq_b: 9500 case Intrinsic::x86_avx2_pcmpeq_w: 9501 case Intrinsic::x86_avx2_pcmpeq_d: 9502 case Intrinsic::x86_avx2_pcmpeq_q: 9503 return DAG.getNode(X86ISD::PCMPEQ, dl, Op.getValueType(), 9504 Op.getOperand(1), Op.getOperand(2)); 9505 case Intrinsic::x86_sse2_pcmpgt_b: 9506 case Intrinsic::x86_sse2_pcmpgt_w: 9507 case Intrinsic::x86_sse2_pcmpgt_d: 9508 case Intrinsic::x86_sse42_pcmpgtq: 9509 case Intrinsic::x86_avx2_pcmpgt_b: 9510 case Intrinsic::x86_avx2_pcmpgt_w: 9511 case Intrinsic::x86_avx2_pcmpgt_d: 9512 case Intrinsic::x86_avx2_pcmpgt_q: 9513 return DAG.getNode(X86ISD::PCMPGT, dl, Op.getValueType(), 9514 Op.getOperand(1), Op.getOperand(2)); 9515 case Intrinsic::x86_ssse3_pshuf_b_128: 9516 case Intrinsic::x86_avx2_pshuf_b: 9517 return DAG.getNode(X86ISD::PSHUFB, dl, Op.getValueType(), 9518 Op.getOperand(1), Op.getOperand(2)); 9519 case Intrinsic::x86_ssse3_psign_b_128: 9520 case Intrinsic::x86_ssse3_psign_w_128: 9521 case Intrinsic::x86_ssse3_psign_d_128: 9522 case Intrinsic::x86_avx2_psign_b: 9523 case Intrinsic::x86_avx2_psign_w: 9524 case Intrinsic::x86_avx2_psign_d: 9525 return DAG.getNode(X86ISD::PSIGN, dl, Op.getValueType(), 9526 Op.getOperand(1), Op.getOperand(2)); 9527 case Intrinsic::x86_sse41_insertps: 9528 return DAG.getNode(X86ISD::INSERTPS, dl, Op.getValueType(), 9529 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 9530 case Intrinsic::x86_avx_vperm2f128_ps_256: 9531 case Intrinsic::x86_avx_vperm2f128_pd_256: 9532 case Intrinsic::x86_avx_vperm2f128_si_256: 9533 case Intrinsic::x86_avx2_vperm2i128: 9534 return DAG.getNode(X86ISD::VPERM2X128, dl, Op.getValueType(), 9535 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 9536 9537 // ptest and testp intrinsics. The intrinsic these come from are designed to 9538 // return an integer value, not just an instruction so lower it to the ptest 9539 // or testp pattern and a setcc for the result. 9540 case Intrinsic::x86_sse41_ptestz: 9541 case Intrinsic::x86_sse41_ptestc: 9542 case Intrinsic::x86_sse41_ptestnzc: 9543 case Intrinsic::x86_avx_ptestz_256: 9544 case Intrinsic::x86_avx_ptestc_256: 9545 case Intrinsic::x86_avx_ptestnzc_256: 9546 case Intrinsic::x86_avx_vtestz_ps: 9547 case Intrinsic::x86_avx_vtestc_ps: 9548 case Intrinsic::x86_avx_vtestnzc_ps: 9549 case Intrinsic::x86_avx_vtestz_pd: 9550 case Intrinsic::x86_avx_vtestc_pd: 9551 case Intrinsic::x86_avx_vtestnzc_pd: 9552 case Intrinsic::x86_avx_vtestz_ps_256: 9553 case Intrinsic::x86_avx_vtestc_ps_256: 9554 case Intrinsic::x86_avx_vtestnzc_ps_256: 9555 case Intrinsic::x86_avx_vtestz_pd_256: 9556 case Intrinsic::x86_avx_vtestc_pd_256: 9557 case Intrinsic::x86_avx_vtestnzc_pd_256: { 9558 bool IsTestPacked = false; 9559 unsigned X86CC = 0; 9560 switch (IntNo) { 9561 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering."); 9562 case Intrinsic::x86_avx_vtestz_ps: 9563 case Intrinsic::x86_avx_vtestz_pd: 9564 case Intrinsic::x86_avx_vtestz_ps_256: 9565 case Intrinsic::x86_avx_vtestz_pd_256: 9566 IsTestPacked = true; // Fallthrough 9567 case Intrinsic::x86_sse41_ptestz: 9568 case Intrinsic::x86_avx_ptestz_256: 9569 // ZF = 1 9570 X86CC = X86::COND_E; 9571 break; 9572 case Intrinsic::x86_avx_vtestc_ps: 9573 case Intrinsic::x86_avx_vtestc_pd: 9574 case Intrinsic::x86_avx_vtestc_ps_256: 9575 case Intrinsic::x86_avx_vtestc_pd_256: 9576 IsTestPacked = true; // Fallthrough 9577 case Intrinsic::x86_sse41_ptestc: 9578 case Intrinsic::x86_avx_ptestc_256: 9579 // CF = 1 9580 X86CC = X86::COND_B; 9581 break; 9582 case Intrinsic::x86_avx_vtestnzc_ps: 9583 case Intrinsic::x86_avx_vtestnzc_pd: 9584 case Intrinsic::x86_avx_vtestnzc_ps_256: 9585 case Intrinsic::x86_avx_vtestnzc_pd_256: 9586 IsTestPacked = true; // Fallthrough 9587 case Intrinsic::x86_sse41_ptestnzc: 9588 case Intrinsic::x86_avx_ptestnzc_256: 9589 // ZF and CF = 0 9590 X86CC = X86::COND_A; 9591 break; 9592 } 9593 9594 SDValue LHS = Op.getOperand(1); 9595 SDValue RHS = Op.getOperand(2); 9596 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST; 9597 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS); 9598 SDValue CC = DAG.getConstant(X86CC, MVT::i8); 9599 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test); 9600 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 9601 } 9602 9603 // SSE/AVX shift intrinsics 9604 case Intrinsic::x86_sse2_psll_w: 9605 case Intrinsic::x86_sse2_psll_d: 9606 case Intrinsic::x86_sse2_psll_q: 9607 case Intrinsic::x86_avx2_psll_w: 9608 case Intrinsic::x86_avx2_psll_d: 9609 case Intrinsic::x86_avx2_psll_q: 9610 return DAG.getNode(X86ISD::VSHL, dl, Op.getValueType(), 9611 Op.getOperand(1), Op.getOperand(2)); 9612 case Intrinsic::x86_sse2_psrl_w: 9613 case Intrinsic::x86_sse2_psrl_d: 9614 case Intrinsic::x86_sse2_psrl_q: 9615 case Intrinsic::x86_avx2_psrl_w: 9616 case Intrinsic::x86_avx2_psrl_d: 9617 case Intrinsic::x86_avx2_psrl_q: 9618 return DAG.getNode(X86ISD::VSRL, dl, Op.getValueType(), 9619 Op.getOperand(1), Op.getOperand(2)); 9620 case Intrinsic::x86_sse2_psra_w: 9621 case Intrinsic::x86_sse2_psra_d: 9622 case Intrinsic::x86_avx2_psra_w: 9623 case Intrinsic::x86_avx2_psra_d: 9624 return DAG.getNode(X86ISD::VSRA, dl, Op.getValueType(), 9625 Op.getOperand(1), Op.getOperand(2)); 9626 case Intrinsic::x86_sse2_pslli_w: 9627 case Intrinsic::x86_sse2_pslli_d: 9628 case Intrinsic::x86_sse2_pslli_q: 9629 case Intrinsic::x86_avx2_pslli_w: 9630 case Intrinsic::x86_avx2_pslli_d: 9631 case Intrinsic::x86_avx2_pslli_q: 9632 return getTargetVShiftNode(X86ISD::VSHLI, dl, Op.getValueType(), 9633 Op.getOperand(1), Op.getOperand(2), DAG); 9634 case Intrinsic::x86_sse2_psrli_w: 9635 case Intrinsic::x86_sse2_psrli_d: 9636 case Intrinsic::x86_sse2_psrli_q: 9637 case Intrinsic::x86_avx2_psrli_w: 9638 case Intrinsic::x86_avx2_psrli_d: 9639 case Intrinsic::x86_avx2_psrli_q: 9640 return getTargetVShiftNode(X86ISD::VSRLI, dl, Op.getValueType(), 9641 Op.getOperand(1), Op.getOperand(2), DAG); 9642 case Intrinsic::x86_sse2_psrai_w: 9643 case Intrinsic::x86_sse2_psrai_d: 9644 case Intrinsic::x86_avx2_psrai_w: 9645 case Intrinsic::x86_avx2_psrai_d: 9646 return getTargetVShiftNode(X86ISD::VSRAI, dl, Op.getValueType(), 9647 Op.getOperand(1), Op.getOperand(2), DAG); 9648 // Fix vector shift instructions where the last operand is a non-immediate 9649 // i32 value. 9650 case Intrinsic::x86_mmx_pslli_w: 9651 case Intrinsic::x86_mmx_pslli_d: 9652 case Intrinsic::x86_mmx_pslli_q: 9653 case Intrinsic::x86_mmx_psrli_w: 9654 case Intrinsic::x86_mmx_psrli_d: 9655 case Intrinsic::x86_mmx_psrli_q: 9656 case Intrinsic::x86_mmx_psrai_w: 9657 case Intrinsic::x86_mmx_psrai_d: { 9658 SDValue ShAmt = Op.getOperand(2); 9659 if (isa<ConstantSDNode>(ShAmt)) 9660 return SDValue(); 9661 9662 unsigned NewIntNo = 0; 9663 switch (IntNo) { 9664 case Intrinsic::x86_mmx_pslli_w: 9665 NewIntNo = Intrinsic::x86_mmx_psll_w; 9666 break; 9667 case Intrinsic::x86_mmx_pslli_d: 9668 NewIntNo = Intrinsic::x86_mmx_psll_d; 9669 break; 9670 case Intrinsic::x86_mmx_pslli_q: 9671 NewIntNo = Intrinsic::x86_mmx_psll_q; 9672 break; 9673 case Intrinsic::x86_mmx_psrli_w: 9674 NewIntNo = Intrinsic::x86_mmx_psrl_w; 9675 break; 9676 case Intrinsic::x86_mmx_psrli_d: 9677 NewIntNo = Intrinsic::x86_mmx_psrl_d; 9678 break; 9679 case Intrinsic::x86_mmx_psrli_q: 9680 NewIntNo = Intrinsic::x86_mmx_psrl_q; 9681 break; 9682 case Intrinsic::x86_mmx_psrai_w: 9683 NewIntNo = Intrinsic::x86_mmx_psra_w; 9684 break; 9685 case Intrinsic::x86_mmx_psrai_d: 9686 NewIntNo = Intrinsic::x86_mmx_psra_d; 9687 break; 9688 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 9689 } 9690 9691 // The vector shift intrinsics with scalars uses 32b shift amounts but 9692 // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits 9693 // to be zero. 9694 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, ShAmt, 9695 DAG.getConstant(0, MVT::i32)); 9696// FIXME this must be lowered to get rid of the invalid type. 9697 9698 EVT VT = Op.getValueType(); 9699 ShAmt = DAG.getNode(ISD::BITCAST, dl, VT, ShAmt); 9700 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9701 DAG.getConstant(NewIntNo, MVT::i32), 9702 Op.getOperand(1), ShAmt); 9703 } 9704 } 9705} 9706 9707SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, 9708 SelectionDAG &DAG) const { 9709 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 9710 MFI->setReturnAddressIsTaken(true); 9711 9712 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9713 DebugLoc dl = Op.getDebugLoc(); 9714 9715 if (Depth > 0) { 9716 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 9717 SDValue Offset = 9718 DAG.getConstant(TD->getPointerSize(), 9719 Subtarget->is64Bit() ? MVT::i64 : MVT::i32); 9720 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 9721 DAG.getNode(ISD::ADD, dl, getPointerTy(), 9722 FrameAddr, Offset), 9723 MachinePointerInfo(), false, false, false, 0); 9724 } 9725 9726 // Just load the return address. 9727 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG); 9728 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 9729 RetAddrFI, MachinePointerInfo(), false, false, false, 0); 9730} 9731 9732SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 9733 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 9734 MFI->setFrameAddressIsTaken(true); 9735 9736 EVT VT = Op.getValueType(); 9737 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 9738 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9739 unsigned FrameReg = Subtarget->is64Bit() ? X86::RBP : X86::EBP; 9740 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 9741 while (Depth--) 9742 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 9743 MachinePointerInfo(), 9744 false, false, false, 0); 9745 return FrameAddr; 9746} 9747 9748SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, 9749 SelectionDAG &DAG) const { 9750 return DAG.getIntPtrConstant(2*TD->getPointerSize()); 9751} 9752 9753SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 9754 MachineFunction &MF = DAG.getMachineFunction(); 9755 SDValue Chain = Op.getOperand(0); 9756 SDValue Offset = Op.getOperand(1); 9757 SDValue Handler = Op.getOperand(2); 9758 DebugLoc dl = Op.getDebugLoc(); 9759 9760 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 9761 Subtarget->is64Bit() ? X86::RBP : X86::EBP, 9762 getPointerTy()); 9763 unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX); 9764 9765 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame, 9766 DAG.getIntPtrConstant(TD->getPointerSize())); 9767 StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset); 9768 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), 9769 false, false, 0); 9770 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr); 9771 MF.getRegInfo().addLiveOut(StoreAddrReg); 9772 9773 return DAG.getNode(X86ISD::EH_RETURN, dl, 9774 MVT::Other, 9775 Chain, DAG.getRegister(StoreAddrReg, getPointerTy())); 9776} 9777 9778SDValue X86TargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 9779 SelectionDAG &DAG) const { 9780 return Op.getOperand(0); 9781} 9782 9783SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 9784 SelectionDAG &DAG) const { 9785 SDValue Root = Op.getOperand(0); 9786 SDValue Trmp = Op.getOperand(1); // trampoline 9787 SDValue FPtr = Op.getOperand(2); // nested function 9788 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 9789 DebugLoc dl = Op.getDebugLoc(); 9790 9791 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 9792 9793 if (Subtarget->is64Bit()) { 9794 SDValue OutChains[6]; 9795 9796 // Large code-model. 9797 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode. 9798 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode. 9799 9800 const unsigned char N86R10 = X86_MC::getX86RegNum(X86::R10); 9801 const unsigned char N86R11 = X86_MC::getX86RegNum(X86::R11); 9802 9803 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 9804 9805 // Load the pointer to the nested function into R11. 9806 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 9807 SDValue Addr = Trmp; 9808 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 9809 Addr, MachinePointerInfo(TrmpAddr), 9810 false, false, 0); 9811 9812 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 9813 DAG.getConstant(2, MVT::i64)); 9814 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr, 9815 MachinePointerInfo(TrmpAddr, 2), 9816 false, false, 2); 9817 9818 // Load the 'nest' parameter value into R10. 9819 // R10 is specified in X86CallingConv.td 9820 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 9821 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 9822 DAG.getConstant(10, MVT::i64)); 9823 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 9824 Addr, MachinePointerInfo(TrmpAddr, 10), 9825 false, false, 0); 9826 9827 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 9828 DAG.getConstant(12, MVT::i64)); 9829 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr, 9830 MachinePointerInfo(TrmpAddr, 12), 9831 false, false, 2); 9832 9833 // Jump to the nested function. 9834 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 9835 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 9836 DAG.getConstant(20, MVT::i64)); 9837 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 9838 Addr, MachinePointerInfo(TrmpAddr, 20), 9839 false, false, 0); 9840 9841 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 9842 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 9843 DAG.getConstant(22, MVT::i64)); 9844 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr, 9845 MachinePointerInfo(TrmpAddr, 22), 9846 false, false, 0); 9847 9848 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 6); 9849 } else { 9850 const Function *Func = 9851 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 9852 CallingConv::ID CC = Func->getCallingConv(); 9853 unsigned NestReg; 9854 9855 switch (CC) { 9856 default: 9857 llvm_unreachable("Unsupported calling convention"); 9858 case CallingConv::C: 9859 case CallingConv::X86_StdCall: { 9860 // Pass 'nest' parameter in ECX. 9861 // Must be kept in sync with X86CallingConv.td 9862 NestReg = X86::ECX; 9863 9864 // Check that ECX wasn't needed by an 'inreg' parameter. 9865 FunctionType *FTy = Func->getFunctionType(); 9866 const AttrListPtr &Attrs = Func->getAttributes(); 9867 9868 if (!Attrs.isEmpty() && !Func->isVarArg()) { 9869 unsigned InRegCount = 0; 9870 unsigned Idx = 1; 9871 9872 for (FunctionType::param_iterator I = FTy->param_begin(), 9873 E = FTy->param_end(); I != E; ++I, ++Idx) 9874 if (Attrs.paramHasAttr(Idx, Attribute::InReg)) 9875 // FIXME: should only count parameters that are lowered to integers. 9876 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32; 9877 9878 if (InRegCount > 2) { 9879 report_fatal_error("Nest register in use - reduce number of inreg" 9880 " parameters!"); 9881 } 9882 } 9883 break; 9884 } 9885 case CallingConv::X86_FastCall: 9886 case CallingConv::X86_ThisCall: 9887 case CallingConv::Fast: 9888 // Pass 'nest' parameter in EAX. 9889 // Must be kept in sync with X86CallingConv.td 9890 NestReg = X86::EAX; 9891 break; 9892 } 9893 9894 SDValue OutChains[4]; 9895 SDValue Addr, Disp; 9896 9897 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 9898 DAG.getConstant(10, MVT::i32)); 9899 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr); 9900 9901 // This is storing the opcode for MOV32ri. 9902 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte. 9903 const unsigned char N86Reg = X86_MC::getX86RegNum(NestReg); 9904 OutChains[0] = DAG.getStore(Root, dl, 9905 DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 9906 Trmp, MachinePointerInfo(TrmpAddr), 9907 false, false, 0); 9908 9909 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 9910 DAG.getConstant(1, MVT::i32)); 9911 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr, 9912 MachinePointerInfo(TrmpAddr, 1), 9913 false, false, 1); 9914 9915 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode. 9916 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 9917 DAG.getConstant(5, MVT::i32)); 9918 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr, 9919 MachinePointerInfo(TrmpAddr, 5), 9920 false, false, 1); 9921 9922 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 9923 DAG.getConstant(6, MVT::i32)); 9924 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr, 9925 MachinePointerInfo(TrmpAddr, 6), 9926 false, false, 1); 9927 9928 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 4); 9929 } 9930} 9931 9932SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, 9933 SelectionDAG &DAG) const { 9934 /* 9935 The rounding mode is in bits 11:10 of FPSR, and has the following 9936 settings: 9937 00 Round to nearest 9938 01 Round to -inf 9939 10 Round to +inf 9940 11 Round to 0 9941 9942 FLT_ROUNDS, on the other hand, expects the following: 9943 -1 Undefined 9944 0 Round to 0 9945 1 Round to nearest 9946 2 Round to +inf 9947 3 Round to -inf 9948 9949 To perform the conversion, we do: 9950 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 9951 */ 9952 9953 MachineFunction &MF = DAG.getMachineFunction(); 9954 const TargetMachine &TM = MF.getTarget(); 9955 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 9956 unsigned StackAlignment = TFI.getStackAlignment(); 9957 EVT VT = Op.getValueType(); 9958 DebugLoc DL = Op.getDebugLoc(); 9959 9960 // Save FP Control Word to stack slot 9961 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false); 9962 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 9963 9964 9965 MachineMemOperand *MMO = 9966 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 9967 MachineMemOperand::MOStore, 2, 2); 9968 9969 SDValue Ops[] = { DAG.getEntryNode(), StackSlot }; 9970 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL, 9971 DAG.getVTList(MVT::Other), 9972 Ops, 2, MVT::i16, MMO); 9973 9974 // Load FP Control Word from stack slot 9975 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, 9976 MachinePointerInfo(), false, false, false, 0); 9977 9978 // Transform as necessary 9979 SDValue CWD1 = 9980 DAG.getNode(ISD::SRL, DL, MVT::i16, 9981 DAG.getNode(ISD::AND, DL, MVT::i16, 9982 CWD, DAG.getConstant(0x800, MVT::i16)), 9983 DAG.getConstant(11, MVT::i8)); 9984 SDValue CWD2 = 9985 DAG.getNode(ISD::SRL, DL, MVT::i16, 9986 DAG.getNode(ISD::AND, DL, MVT::i16, 9987 CWD, DAG.getConstant(0x400, MVT::i16)), 9988 DAG.getConstant(9, MVT::i8)); 9989 9990 SDValue RetVal = 9991 DAG.getNode(ISD::AND, DL, MVT::i16, 9992 DAG.getNode(ISD::ADD, DL, MVT::i16, 9993 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2), 9994 DAG.getConstant(1, MVT::i16)), 9995 DAG.getConstant(3, MVT::i16)); 9996 9997 9998 return DAG.getNode((VT.getSizeInBits() < 16 ? 9999 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal); 10000} 10001 10002SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const { 10003 EVT VT = Op.getValueType(); 10004 EVT OpVT = VT; 10005 unsigned NumBits = VT.getSizeInBits(); 10006 DebugLoc dl = Op.getDebugLoc(); 10007 10008 Op = Op.getOperand(0); 10009 if (VT == MVT::i8) { 10010 // Zero extend to i32 since there is not an i8 bsr. 10011 OpVT = MVT::i32; 10012 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 10013 } 10014 10015 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 10016 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 10017 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 10018 10019 // If src is zero (i.e. bsr sets ZF), returns NumBits. 10020 SDValue Ops[] = { 10021 Op, 10022 DAG.getConstant(NumBits+NumBits-1, OpVT), 10023 DAG.getConstant(X86::COND_E, MVT::i8), 10024 Op.getValue(1) 10025 }; 10026 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); 10027 10028 // Finally xor with NumBits-1. 10029 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 10030 10031 if (VT == MVT::i8) 10032 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 10033 return Op; 10034} 10035 10036SDValue X86TargetLowering::LowerCTLZ_ZERO_UNDEF(SDValue Op, 10037 SelectionDAG &DAG) const { 10038 EVT VT = Op.getValueType(); 10039 EVT OpVT = VT; 10040 unsigned NumBits = VT.getSizeInBits(); 10041 DebugLoc dl = Op.getDebugLoc(); 10042 10043 Op = Op.getOperand(0); 10044 if (VT == MVT::i8) { 10045 // Zero extend to i32 since there is not an i8 bsr. 10046 OpVT = MVT::i32; 10047 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 10048 } 10049 10050 // Issue a bsr (scan bits in reverse). 10051 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 10052 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 10053 10054 // And xor with NumBits-1. 10055 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 10056 10057 if (VT == MVT::i8) 10058 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 10059 return Op; 10060} 10061 10062SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const { 10063 EVT VT = Op.getValueType(); 10064 unsigned NumBits = VT.getSizeInBits(); 10065 DebugLoc dl = Op.getDebugLoc(); 10066 Op = Op.getOperand(0); 10067 10068 // Issue a bsf (scan bits forward) which also sets EFLAGS. 10069 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 10070 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op); 10071 10072 // If src is zero (i.e. bsf sets ZF), returns NumBits. 10073 SDValue Ops[] = { 10074 Op, 10075 DAG.getConstant(NumBits, VT), 10076 DAG.getConstant(X86::COND_E, MVT::i8), 10077 Op.getValue(1) 10078 }; 10079 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops, array_lengthof(Ops)); 10080} 10081 10082// Lower256IntArith - Break a 256-bit integer operation into two new 128-bit 10083// ones, and then concatenate the result back. 10084static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) { 10085 EVT VT = Op.getValueType(); 10086 10087 assert(VT.getSizeInBits() == 256 && VT.isInteger() && 10088 "Unsupported value type for operation"); 10089 10090 int NumElems = VT.getVectorNumElements(); 10091 DebugLoc dl = Op.getDebugLoc(); 10092 SDValue Idx0 = DAG.getConstant(0, MVT::i32); 10093 SDValue Idx1 = DAG.getConstant(NumElems/2, MVT::i32); 10094 10095 // Extract the LHS vectors 10096 SDValue LHS = Op.getOperand(0); 10097 SDValue LHS1 = Extract128BitVector(LHS, Idx0, DAG, dl); 10098 SDValue LHS2 = Extract128BitVector(LHS, Idx1, DAG, dl); 10099 10100 // Extract the RHS vectors 10101 SDValue RHS = Op.getOperand(1); 10102 SDValue RHS1 = Extract128BitVector(RHS, Idx0, DAG, dl); 10103 SDValue RHS2 = Extract128BitVector(RHS, Idx1, DAG, dl); 10104 10105 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 10106 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 10107 10108 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 10109 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1), 10110 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2)); 10111} 10112 10113SDValue X86TargetLowering::LowerADD(SDValue Op, SelectionDAG &DAG) const { 10114 assert(Op.getValueType().getSizeInBits() == 256 && 10115 Op.getValueType().isInteger() && 10116 "Only handle AVX 256-bit vector integer operation"); 10117 return Lower256IntArith(Op, DAG); 10118} 10119 10120SDValue X86TargetLowering::LowerSUB(SDValue Op, SelectionDAG &DAG) const { 10121 assert(Op.getValueType().getSizeInBits() == 256 && 10122 Op.getValueType().isInteger() && 10123 "Only handle AVX 256-bit vector integer operation"); 10124 return Lower256IntArith(Op, DAG); 10125} 10126 10127SDValue X86TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 10128 EVT VT = Op.getValueType(); 10129 10130 // Decompose 256-bit ops into smaller 128-bit ops. 10131 if (VT.getSizeInBits() == 256 && !Subtarget->hasAVX2()) 10132 return Lower256IntArith(Op, DAG); 10133 10134 DebugLoc dl = Op.getDebugLoc(); 10135 10136 SDValue A = Op.getOperand(0); 10137 SDValue B = Op.getOperand(1); 10138 10139 if (VT == MVT::v4i64) { 10140 assert(Subtarget->hasAVX2() && "Lowering v4i64 multiply requires AVX2"); 10141 10142 // ulong2 Ahi = __builtin_ia32_psrlqi256( a, 32); 10143 // ulong2 Bhi = __builtin_ia32_psrlqi256( b, 32); 10144 // ulong2 AloBlo = __builtin_ia32_pmuludq256( a, b ); 10145 // ulong2 AloBhi = __builtin_ia32_pmuludq256( a, Bhi ); 10146 // ulong2 AhiBlo = __builtin_ia32_pmuludq256( Ahi, b ); 10147 // 10148 // AloBhi = __builtin_ia32_psllqi256( AloBhi, 32 ); 10149 // AhiBlo = __builtin_ia32_psllqi256( AhiBlo, 32 ); 10150 // return AloBlo + AloBhi + AhiBlo; 10151 10152 SDValue Ahi = DAG.getNode(X86ISD::VSRLI, dl, VT, A, 10153 DAG.getConstant(32, MVT::i32)); 10154 SDValue Bhi = DAG.getNode(X86ISD::VSRLI, dl, VT, B, 10155 DAG.getConstant(32, MVT::i32)); 10156 SDValue AloBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10157 DAG.getConstant(Intrinsic::x86_avx2_pmulu_dq, MVT::i32), 10158 A, B); 10159 SDValue AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10160 DAG.getConstant(Intrinsic::x86_avx2_pmulu_dq, MVT::i32), 10161 A, Bhi); 10162 SDValue AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10163 DAG.getConstant(Intrinsic::x86_avx2_pmulu_dq, MVT::i32), 10164 Ahi, B); 10165 AloBhi = DAG.getNode(X86ISD::VSHLI, dl, VT, AloBhi, 10166 DAG.getConstant(32, MVT::i32)); 10167 AhiBlo = DAG.getNode(X86ISD::VSHLI, dl, VT, AhiBlo, 10168 DAG.getConstant(32, MVT::i32)); 10169 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi); 10170 Res = DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo); 10171 return Res; 10172 } 10173 10174 assert(VT == MVT::v2i64 && "Only know how to lower V2I64 multiply"); 10175 10176 // ulong2 Ahi = __builtin_ia32_psrlqi128( a, 32); 10177 // ulong2 Bhi = __builtin_ia32_psrlqi128( b, 32); 10178 // ulong2 AloBlo = __builtin_ia32_pmuludq128( a, b ); 10179 // ulong2 AloBhi = __builtin_ia32_pmuludq128( a, Bhi ); 10180 // ulong2 AhiBlo = __builtin_ia32_pmuludq128( Ahi, b ); 10181 // 10182 // AloBhi = __builtin_ia32_psllqi128( AloBhi, 32 ); 10183 // AhiBlo = __builtin_ia32_psllqi128( AhiBlo, 32 ); 10184 // return AloBlo + AloBhi + AhiBlo; 10185 10186 SDValue Ahi = DAG.getNode(X86ISD::VSRLI, dl, VT, A, 10187 DAG.getConstant(32, MVT::i32)); 10188 SDValue Bhi = DAG.getNode(X86ISD::VSRLI, dl, VT, B, 10189 DAG.getConstant(32, MVT::i32)); 10190 SDValue AloBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10191 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32), 10192 A, B); 10193 SDValue AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10194 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32), 10195 A, Bhi); 10196 SDValue AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10197 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32), 10198 Ahi, B); 10199 AloBhi = DAG.getNode(X86ISD::VSHLI, dl, VT, AloBhi, 10200 DAG.getConstant(32, MVT::i32)); 10201 AhiBlo = DAG.getNode(X86ISD::VSHLI, dl, VT, AhiBlo, 10202 DAG.getConstant(32, MVT::i32)); 10203 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi); 10204 Res = DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo); 10205 return Res; 10206} 10207 10208SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { 10209 10210 EVT VT = Op.getValueType(); 10211 DebugLoc dl = Op.getDebugLoc(); 10212 SDValue R = Op.getOperand(0); 10213 SDValue Amt = Op.getOperand(1); 10214 LLVMContext *Context = DAG.getContext(); 10215 10216 if (!Subtarget->hasSSE2()) 10217 return SDValue(); 10218 10219 // Optimize shl/srl/sra with constant shift amount. 10220 if (isSplatVector(Amt.getNode())) { 10221 SDValue SclrAmt = Amt->getOperand(0); 10222 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) { 10223 uint64_t ShiftAmt = C->getZExtValue(); 10224 10225 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || 10226 (Subtarget->hasAVX2() && 10227 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16))) { 10228 if (Op.getOpcode() == ISD::SHL) 10229 return DAG.getNode(X86ISD::VSHLI, dl, VT, R, 10230 DAG.getConstant(ShiftAmt, MVT::i32)); 10231 if (Op.getOpcode() == ISD::SRL) 10232 return DAG.getNode(X86ISD::VSRLI, dl, VT, R, 10233 DAG.getConstant(ShiftAmt, MVT::i32)); 10234 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64) 10235 return DAG.getNode(X86ISD::VSRAI, dl, VT, R, 10236 DAG.getConstant(ShiftAmt, MVT::i32)); 10237 } 10238 10239 if (VT == MVT::v16i8) { 10240 if (Op.getOpcode() == ISD::SHL) { 10241 // Make a large shift. 10242 SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v8i16, R, 10243 DAG.getConstant(ShiftAmt, MVT::i32)); 10244 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL); 10245 // Zero out the rightmost bits. 10246 SmallVector<SDValue, 16> V(16, 10247 DAG.getConstant(uint8_t(-1U << ShiftAmt), 10248 MVT::i8)); 10249 return DAG.getNode(ISD::AND, dl, VT, SHL, 10250 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16)); 10251 } 10252 if (Op.getOpcode() == ISD::SRL) { 10253 // Make a large shift. 10254 SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v8i16, R, 10255 DAG.getConstant(ShiftAmt, MVT::i32)); 10256 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL); 10257 // Zero out the leftmost bits. 10258 SmallVector<SDValue, 16> V(16, 10259 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, 10260 MVT::i8)); 10261 return DAG.getNode(ISD::AND, dl, VT, SRL, 10262 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16)); 10263 } 10264 if (Op.getOpcode() == ISD::SRA) { 10265 if (ShiftAmt == 7) { 10266 // R s>> 7 === R s< 0 10267 SDValue Zeros = getZeroVector(VT, /* HasSSE2 */true, 10268 /* HasAVX2 */false, DAG, dl); 10269 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R); 10270 } 10271 10272 // R s>> a === ((R u>> a) ^ m) - m 10273 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); 10274 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt, 10275 MVT::i8)); 10276 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16); 10277 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); 10278 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); 10279 return Res; 10280 } 10281 } 10282 10283 if (Subtarget->hasAVX2() && VT == MVT::v32i8) { 10284 if (Op.getOpcode() == ISD::SHL) { 10285 // Make a large shift. 10286 SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v16i16, R, 10287 DAG.getConstant(ShiftAmt, MVT::i32)); 10288 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL); 10289 // Zero out the rightmost bits. 10290 SmallVector<SDValue, 32> V(32, 10291 DAG.getConstant(uint8_t(-1U << ShiftAmt), 10292 MVT::i8)); 10293 return DAG.getNode(ISD::AND, dl, VT, SHL, 10294 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32)); 10295 } 10296 if (Op.getOpcode() == ISD::SRL) { 10297 // Make a large shift. 10298 SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v16i16, R, 10299 DAG.getConstant(ShiftAmt, MVT::i32)); 10300 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL); 10301 // Zero out the leftmost bits. 10302 SmallVector<SDValue, 32> V(32, 10303 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, 10304 MVT::i8)); 10305 return DAG.getNode(ISD::AND, dl, VT, SRL, 10306 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32)); 10307 } 10308 if (Op.getOpcode() == ISD::SRA) { 10309 if (ShiftAmt == 7) { 10310 // R s>> 7 === R s< 0 10311 SDValue Zeros = getZeroVector(VT, true /* HasSSE2 */, 10312 true /* HasAVX2 */, DAG, dl); 10313 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R); 10314 } 10315 10316 // R s>> a === ((R u>> a) ^ m) - m 10317 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); 10318 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt, 10319 MVT::i8)); 10320 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32); 10321 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); 10322 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); 10323 return Res; 10324 } 10325 } 10326 } 10327 } 10328 10329 // Lower SHL with variable shift amount. 10330 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) { 10331 Op = DAG.getNode(X86ISD::VSHLI, dl, VT, Op.getOperand(1), 10332 DAG.getConstant(23, MVT::i32)); 10333 10334 ConstantInt *CI = ConstantInt::get(*Context, APInt(32, 0x3f800000U)); 10335 Constant *C = ConstantVector::getSplat(4, CI); 10336 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 10337 SDValue Addend = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 10338 MachinePointerInfo::getConstantPool(), 10339 false, false, false, 16); 10340 10341 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend); 10342 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op); 10343 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op); 10344 return DAG.getNode(ISD::MUL, dl, VT, Op, R); 10345 } 10346 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) { 10347 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq."); 10348 10349 // a = a << 5; 10350 Op = DAG.getNode(X86ISD::VSHLI, dl, MVT::v8i16, Op.getOperand(1), 10351 DAG.getConstant(5, MVT::i32)); 10352 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op); 10353 10354 // Turn 'a' into a mask suitable for VSELECT 10355 SDValue VSelM = DAG.getConstant(0x80, VT); 10356 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 10357 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 10358 10359 SDValue CM1 = DAG.getConstant(0x0f, VT); 10360 SDValue CM2 = DAG.getConstant(0x3f, VT); 10361 10362 // r = VSELECT(r, psllw(r & (char16)15, 4), a); 10363 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1); 10364 M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 10365 DAG.getConstant(4, MVT::i32), DAG); 10366 M = DAG.getNode(ISD::BITCAST, dl, VT, M); 10367 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); 10368 10369 // a += a 10370 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 10371 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 10372 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 10373 10374 // r = VSELECT(r, psllw(r & (char16)63, 2), a); 10375 M = DAG.getNode(ISD::AND, dl, VT, R, CM2); 10376 M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 10377 DAG.getConstant(2, MVT::i32), DAG); 10378 M = DAG.getNode(ISD::BITCAST, dl, VT, M); 10379 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); 10380 10381 // a += a 10382 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 10383 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 10384 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 10385 10386 // return VSELECT(r, r+r, a); 10387 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, 10388 DAG.getNode(ISD::ADD, dl, VT, R, R), R); 10389 return R; 10390 } 10391 10392 // Decompose 256-bit shifts into smaller 128-bit shifts. 10393 if (VT.getSizeInBits() == 256) { 10394 unsigned NumElems = VT.getVectorNumElements(); 10395 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 10396 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 10397 10398 // Extract the two vectors 10399 SDValue V1 = Extract128BitVector(R, DAG.getConstant(0, MVT::i32), DAG, dl); 10400 SDValue V2 = Extract128BitVector(R, DAG.getConstant(NumElems/2, MVT::i32), 10401 DAG, dl); 10402 10403 // Recreate the shift amount vectors 10404 SDValue Amt1, Amt2; 10405 if (Amt.getOpcode() == ISD::BUILD_VECTOR) { 10406 // Constant shift amount 10407 SmallVector<SDValue, 4> Amt1Csts; 10408 SmallVector<SDValue, 4> Amt2Csts; 10409 for (unsigned i = 0; i != NumElems/2; ++i) 10410 Amt1Csts.push_back(Amt->getOperand(i)); 10411 for (unsigned i = NumElems/2; i != NumElems; ++i) 10412 Amt2Csts.push_back(Amt->getOperand(i)); 10413 10414 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 10415 &Amt1Csts[0], NumElems/2); 10416 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 10417 &Amt2Csts[0], NumElems/2); 10418 } else { 10419 // Variable shift amount 10420 Amt1 = Extract128BitVector(Amt, DAG.getConstant(0, MVT::i32), DAG, dl); 10421 Amt2 = Extract128BitVector(Amt, DAG.getConstant(NumElems/2, MVT::i32), 10422 DAG, dl); 10423 } 10424 10425 // Issue new vector shifts for the smaller types 10426 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1); 10427 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2); 10428 10429 // Concatenate the result back 10430 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2); 10431 } 10432 10433 return SDValue(); 10434} 10435 10436SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const { 10437 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus 10438 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering 10439 // looks for this combo and may remove the "setcc" instruction if the "setcc" 10440 // has only one use. 10441 SDNode *N = Op.getNode(); 10442 SDValue LHS = N->getOperand(0); 10443 SDValue RHS = N->getOperand(1); 10444 unsigned BaseOp = 0; 10445 unsigned Cond = 0; 10446 DebugLoc DL = Op.getDebugLoc(); 10447 switch (Op.getOpcode()) { 10448 default: llvm_unreachable("Unknown ovf instruction!"); 10449 case ISD::SADDO: 10450 // A subtract of one will be selected as a INC. Note that INC doesn't 10451 // set CF, so we can't do this for UADDO. 10452 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 10453 if (C->isOne()) { 10454 BaseOp = X86ISD::INC; 10455 Cond = X86::COND_O; 10456 break; 10457 } 10458 BaseOp = X86ISD::ADD; 10459 Cond = X86::COND_O; 10460 break; 10461 case ISD::UADDO: 10462 BaseOp = X86ISD::ADD; 10463 Cond = X86::COND_B; 10464 break; 10465 case ISD::SSUBO: 10466 // A subtract of one will be selected as a DEC. Note that DEC doesn't 10467 // set CF, so we can't do this for USUBO. 10468 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 10469 if (C->isOne()) { 10470 BaseOp = X86ISD::DEC; 10471 Cond = X86::COND_O; 10472 break; 10473 } 10474 BaseOp = X86ISD::SUB; 10475 Cond = X86::COND_O; 10476 break; 10477 case ISD::USUBO: 10478 BaseOp = X86ISD::SUB; 10479 Cond = X86::COND_B; 10480 break; 10481 case ISD::SMULO: 10482 BaseOp = X86ISD::SMUL; 10483 Cond = X86::COND_O; 10484 break; 10485 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs 10486 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0), 10487 MVT::i32); 10488 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS); 10489 10490 SDValue SetCC = 10491 DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 10492 DAG.getConstant(X86::COND_O, MVT::i32), 10493 SDValue(Sum.getNode(), 2)); 10494 10495 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 10496 } 10497 } 10498 10499 // Also sets EFLAGS. 10500 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); 10501 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS); 10502 10503 SDValue SetCC = 10504 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1), 10505 DAG.getConstant(Cond, MVT::i32), 10506 SDValue(Sum.getNode(), 1)); 10507 10508 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 10509} 10510 10511SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 10512 SelectionDAG &DAG) const { 10513 DebugLoc dl = Op.getDebugLoc(); 10514 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 10515 EVT VT = Op.getValueType(); 10516 10517 if (!Subtarget->hasSSE2() || !VT.isVector()) 10518 return SDValue(); 10519 10520 unsigned BitsDiff = VT.getScalarType().getSizeInBits() - 10521 ExtraVT.getScalarType().getSizeInBits(); 10522 SDValue ShAmt = DAG.getConstant(BitsDiff, MVT::i32); 10523 10524 switch (VT.getSimpleVT().SimpleTy) { 10525 default: return SDValue(); 10526 case MVT::v8i32: 10527 case MVT::v16i16: 10528 if (!Subtarget->hasAVX()) 10529 return SDValue(); 10530 if (!Subtarget->hasAVX2()) { 10531 // needs to be split 10532 int NumElems = VT.getVectorNumElements(); 10533 SDValue Idx0 = DAG.getConstant(0, MVT::i32); 10534 SDValue Idx1 = DAG.getConstant(NumElems/2, MVT::i32); 10535 10536 // Extract the LHS vectors 10537 SDValue LHS = Op.getOperand(0); 10538 SDValue LHS1 = Extract128BitVector(LHS, Idx0, DAG, dl); 10539 SDValue LHS2 = Extract128BitVector(LHS, Idx1, DAG, dl); 10540 10541 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 10542 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 10543 10544 EVT ExtraEltVT = ExtraVT.getVectorElementType(); 10545 int ExtraNumElems = ExtraVT.getVectorNumElements(); 10546 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT, 10547 ExtraNumElems/2); 10548 SDValue Extra = DAG.getValueType(ExtraVT); 10549 10550 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra); 10551 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra); 10552 10553 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);; 10554 } 10555 // fall through 10556 case MVT::v4i32: 10557 case MVT::v8i16: { 10558 SDValue Tmp1 = getTargetVShiftNode(X86ISD::VSHLI, dl, VT, 10559 Op.getOperand(0), ShAmt, DAG); 10560 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, Tmp1, ShAmt, DAG); 10561 } 10562 } 10563} 10564 10565 10566SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{ 10567 DebugLoc dl = Op.getDebugLoc(); 10568 10569 // Go ahead and emit the fence on x86-64 even if we asked for no-sse2. 10570 // There isn't any reason to disable it if the target processor supports it. 10571 if (!Subtarget->hasSSE2() && !Subtarget->is64Bit()) { 10572 SDValue Chain = Op.getOperand(0); 10573 SDValue Zero = DAG.getConstant(0, MVT::i32); 10574 SDValue Ops[] = { 10575 DAG.getRegister(X86::ESP, MVT::i32), // Base 10576 DAG.getTargetConstant(1, MVT::i8), // Scale 10577 DAG.getRegister(0, MVT::i32), // Index 10578 DAG.getTargetConstant(0, MVT::i32), // Disp 10579 DAG.getRegister(0, MVT::i32), // Segment. 10580 Zero, 10581 Chain 10582 }; 10583 SDNode *Res = 10584 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, 10585 array_lengthof(Ops)); 10586 return SDValue(Res, 0); 10587 } 10588 10589 unsigned isDev = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); 10590 if (!isDev) 10591 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 10592 10593 unsigned Op1 = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 10594 unsigned Op2 = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 10595 unsigned Op3 = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 10596 unsigned Op4 = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 10597 10598 // def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>; 10599 if (!Op1 && !Op2 && !Op3 && Op4) 10600 return DAG.getNode(X86ISD::SFENCE, dl, MVT::Other, Op.getOperand(0)); 10601 10602 // def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>; 10603 if (Op1 && !Op2 && !Op3 && !Op4) 10604 return DAG.getNode(X86ISD::LFENCE, dl, MVT::Other, Op.getOperand(0)); 10605 10606 // def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)), 10607 // (MFENCE)>; 10608 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 10609} 10610 10611SDValue X86TargetLowering::LowerATOMIC_FENCE(SDValue Op, 10612 SelectionDAG &DAG) const { 10613 DebugLoc dl = Op.getDebugLoc(); 10614 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( 10615 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); 10616 SynchronizationScope FenceScope = static_cast<SynchronizationScope>( 10617 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); 10618 10619 // The only fence that needs an instruction is a sequentially-consistent 10620 // cross-thread fence. 10621 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) { 10622 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for 10623 // no-sse2). There isn't any reason to disable it if the target processor 10624 // supports it. 10625 if (Subtarget->hasSSE2() || Subtarget->is64Bit()) 10626 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 10627 10628 SDValue Chain = Op.getOperand(0); 10629 SDValue Zero = DAG.getConstant(0, MVT::i32); 10630 SDValue Ops[] = { 10631 DAG.getRegister(X86::ESP, MVT::i32), // Base 10632 DAG.getTargetConstant(1, MVT::i8), // Scale 10633 DAG.getRegister(0, MVT::i32), // Index 10634 DAG.getTargetConstant(0, MVT::i32), // Disp 10635 DAG.getRegister(0, MVT::i32), // Segment. 10636 Zero, 10637 Chain 10638 }; 10639 SDNode *Res = 10640 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, 10641 array_lengthof(Ops)); 10642 return SDValue(Res, 0); 10643 } 10644 10645 // MEMBARRIER is a compiler barrier; it codegens to a no-op. 10646 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 10647} 10648 10649 10650SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 10651 EVT T = Op.getValueType(); 10652 DebugLoc DL = Op.getDebugLoc(); 10653 unsigned Reg = 0; 10654 unsigned size = 0; 10655 switch(T.getSimpleVT().SimpleTy) { 10656 default: 10657 assert(false && "Invalid value type!"); 10658 case MVT::i8: Reg = X86::AL; size = 1; break; 10659 case MVT::i16: Reg = X86::AX; size = 2; break; 10660 case MVT::i32: Reg = X86::EAX; size = 4; break; 10661 case MVT::i64: 10662 assert(Subtarget->is64Bit() && "Node not type legal!"); 10663 Reg = X86::RAX; size = 8; 10664 break; 10665 } 10666 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg, 10667 Op.getOperand(2), SDValue()); 10668 SDValue Ops[] = { cpIn.getValue(0), 10669 Op.getOperand(1), 10670 Op.getOperand(3), 10671 DAG.getTargetConstant(size, MVT::i8), 10672 cpIn.getValue(1) }; 10673 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 10674 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand(); 10675 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys, 10676 Ops, 5, T, MMO); 10677 SDValue cpOut = 10678 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1)); 10679 return cpOut; 10680} 10681 10682SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op, 10683 SelectionDAG &DAG) const { 10684 assert(Subtarget->is64Bit() && "Result not type legalized?"); 10685 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 10686 SDValue TheChain = Op.getOperand(0); 10687 DebugLoc dl = Op.getDebugLoc(); 10688 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 10689 SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1)); 10690 SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64, 10691 rax.getValue(2)); 10692 SDValue Tmp = DAG.getNode(ISD::SHL, dl, MVT::i64, rdx, 10693 DAG.getConstant(32, MVT::i8)); 10694 SDValue Ops[] = { 10695 DAG.getNode(ISD::OR, dl, MVT::i64, rax, Tmp), 10696 rdx.getValue(1) 10697 }; 10698 return DAG.getMergeValues(Ops, 2, dl); 10699} 10700 10701SDValue X86TargetLowering::LowerBITCAST(SDValue Op, 10702 SelectionDAG &DAG) const { 10703 EVT SrcVT = Op.getOperand(0).getValueType(); 10704 EVT DstVT = Op.getValueType(); 10705 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() && 10706 Subtarget->hasMMX() && "Unexpected custom BITCAST"); 10707 assert((DstVT == MVT::i64 || 10708 (DstVT.isVector() && DstVT.getSizeInBits()==64)) && 10709 "Unexpected custom BITCAST"); 10710 // i64 <=> MMX conversions are Legal. 10711 if (SrcVT==MVT::i64 && DstVT.isVector()) 10712 return Op; 10713 if (DstVT==MVT::i64 && SrcVT.isVector()) 10714 return Op; 10715 // MMX <=> MMX conversions are Legal. 10716 if (SrcVT.isVector() && DstVT.isVector()) 10717 return Op; 10718 // All other conversions need to be expanded. 10719 return SDValue(); 10720} 10721 10722SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const { 10723 SDNode *Node = Op.getNode(); 10724 DebugLoc dl = Node->getDebugLoc(); 10725 EVT T = Node->getValueType(0); 10726 SDValue negOp = DAG.getNode(ISD::SUB, dl, T, 10727 DAG.getConstant(0, T), Node->getOperand(2)); 10728 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, 10729 cast<AtomicSDNode>(Node)->getMemoryVT(), 10730 Node->getOperand(0), 10731 Node->getOperand(1), negOp, 10732 cast<AtomicSDNode>(Node)->getSrcValue(), 10733 cast<AtomicSDNode>(Node)->getAlignment(), 10734 cast<AtomicSDNode>(Node)->getOrdering(), 10735 cast<AtomicSDNode>(Node)->getSynchScope()); 10736} 10737 10738static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) { 10739 SDNode *Node = Op.getNode(); 10740 DebugLoc dl = Node->getDebugLoc(); 10741 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 10742 10743 // Convert seq_cst store -> xchg 10744 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b) 10745 // FIXME: On 32-bit, store -> fist or movq would be more efficient 10746 // (The only way to get a 16-byte store is cmpxchg16b) 10747 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment. 10748 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent || 10749 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 10750 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, 10751 cast<AtomicSDNode>(Node)->getMemoryVT(), 10752 Node->getOperand(0), 10753 Node->getOperand(1), Node->getOperand(2), 10754 cast<AtomicSDNode>(Node)->getMemOperand(), 10755 cast<AtomicSDNode>(Node)->getOrdering(), 10756 cast<AtomicSDNode>(Node)->getSynchScope()); 10757 return Swap.getValue(1); 10758 } 10759 // Other atomic stores have a simple pattern. 10760 return Op; 10761} 10762 10763static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 10764 EVT VT = Op.getNode()->getValueType(0); 10765 10766 // Let legalize expand this if it isn't a legal type yet. 10767 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 10768 return SDValue(); 10769 10770 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 10771 10772 unsigned Opc; 10773 bool ExtraOp = false; 10774 switch (Op.getOpcode()) { 10775 default: assert(0 && "Invalid code"); 10776 case ISD::ADDC: Opc = X86ISD::ADD; break; 10777 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break; 10778 case ISD::SUBC: Opc = X86ISD::SUB; break; 10779 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break; 10780 } 10781 10782 if (!ExtraOp) 10783 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 10784 Op.getOperand(1)); 10785 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 10786 Op.getOperand(1), Op.getOperand(2)); 10787} 10788 10789/// LowerOperation - Provide custom lowering hooks for some operations. 10790/// 10791SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 10792 switch (Op.getOpcode()) { 10793 default: llvm_unreachable("Should not custom lower this!"); 10794 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG); 10795 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op,DAG); 10796 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op,DAG); 10797 case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG); 10798 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG); 10799 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG); 10800 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 10801 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 10802 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 10803 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 10804 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 10805 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); 10806 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG); 10807 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 10808 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 10809 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 10810 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 10811 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 10812 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 10813 case ISD::SHL_PARTS: 10814 case ISD::SRA_PARTS: 10815 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG); 10816 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 10817 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 10818 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 10819 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 10820 case ISD::FABS: return LowerFABS(Op, DAG); 10821 case ISD::FNEG: return LowerFNEG(Op, DAG); 10822 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 10823 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG); 10824 case ISD::SETCC: return LowerSETCC(Op, DAG); 10825 case ISD::SELECT: return LowerSELECT(Op, DAG); 10826 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 10827 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 10828 case ISD::VASTART: return LowerVASTART(Op, DAG); 10829 case ISD::VAARG: return LowerVAARG(Op, DAG); 10830 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 10831 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 10832 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 10833 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 10834 case ISD::FRAME_TO_ARGS_OFFSET: 10835 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 10836 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 10837 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 10838 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 10839 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 10840 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 10841 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 10842 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG); 10843 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 10844 case ISD::MUL: return LowerMUL(Op, DAG); 10845 case ISD::SRA: 10846 case ISD::SRL: 10847 case ISD::SHL: return LowerShift(Op, DAG); 10848 case ISD::SADDO: 10849 case ISD::UADDO: 10850 case ISD::SSUBO: 10851 case ISD::USUBO: 10852 case ISD::SMULO: 10853 case ISD::UMULO: return LowerXALUO(Op, DAG); 10854 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG); 10855 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 10856 case ISD::ADDC: 10857 case ISD::ADDE: 10858 case ISD::SUBC: 10859 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 10860 case ISD::ADD: return LowerADD(Op, DAG); 10861 case ISD::SUB: return LowerSUB(Op, DAG); 10862 } 10863} 10864 10865static void ReplaceATOMIC_LOAD(SDNode *Node, 10866 SmallVectorImpl<SDValue> &Results, 10867 SelectionDAG &DAG) { 10868 DebugLoc dl = Node->getDebugLoc(); 10869 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 10870 10871 // Convert wide load -> cmpxchg8b/cmpxchg16b 10872 // FIXME: On 32-bit, load -> fild or movq would be more efficient 10873 // (The only way to get a 16-byte load is cmpxchg16b) 10874 // FIXME: 16-byte ATOMIC_CMP_SWAP isn't actually hooked up at the moment. 10875 SDValue Zero = DAG.getConstant(0, VT); 10876 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl, VT, 10877 Node->getOperand(0), 10878 Node->getOperand(1), Zero, Zero, 10879 cast<AtomicSDNode>(Node)->getMemOperand(), 10880 cast<AtomicSDNode>(Node)->getOrdering(), 10881 cast<AtomicSDNode>(Node)->getSynchScope()); 10882 Results.push_back(Swap.getValue(0)); 10883 Results.push_back(Swap.getValue(1)); 10884} 10885 10886void X86TargetLowering:: 10887ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results, 10888 SelectionDAG &DAG, unsigned NewOp) const { 10889 DebugLoc dl = Node->getDebugLoc(); 10890 assert (Node->getValueType(0) == MVT::i64 && 10891 "Only know how to expand i64 atomics"); 10892 10893 SDValue Chain = Node->getOperand(0); 10894 SDValue In1 = Node->getOperand(1); 10895 SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 10896 Node->getOperand(2), DAG.getIntPtrConstant(0)); 10897 SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 10898 Node->getOperand(2), DAG.getIntPtrConstant(1)); 10899 SDValue Ops[] = { Chain, In1, In2L, In2H }; 10900 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 10901 SDValue Result = 10902 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops, 4, MVT::i64, 10903 cast<MemSDNode>(Node)->getMemOperand()); 10904 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)}; 10905 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 10906 Results.push_back(Result.getValue(2)); 10907} 10908 10909/// ReplaceNodeResults - Replace a node with an illegal result type 10910/// with a new node built out of custom code. 10911void X86TargetLowering::ReplaceNodeResults(SDNode *N, 10912 SmallVectorImpl<SDValue>&Results, 10913 SelectionDAG &DAG) const { 10914 DebugLoc dl = N->getDebugLoc(); 10915 switch (N->getOpcode()) { 10916 default: 10917 assert(false && "Do not know how to custom type legalize this operation!"); 10918 return; 10919 case ISD::SIGN_EXTEND_INREG: 10920 case ISD::ADDC: 10921 case ISD::ADDE: 10922 case ISD::SUBC: 10923 case ISD::SUBE: 10924 // We don't want to expand or promote these. 10925 return; 10926 case ISD::FP_TO_SINT: { 10927 std::pair<SDValue,SDValue> Vals = 10928 FP_TO_INTHelper(SDValue(N, 0), DAG, true); 10929 SDValue FIST = Vals.first, StackSlot = Vals.second; 10930 if (FIST.getNode() != 0) { 10931 EVT VT = N->getValueType(0); 10932 // Return a load from the stack slot. 10933 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, 10934 MachinePointerInfo(), 10935 false, false, false, 0)); 10936 } 10937 return; 10938 } 10939 case ISD::READCYCLECOUNTER: { 10940 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 10941 SDValue TheChain = N->getOperand(0); 10942 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 10943 SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32, 10944 rd.getValue(1)); 10945 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, MVT::i32, 10946 eax.getValue(2)); 10947 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 10948 SDValue Ops[] = { eax, edx }; 10949 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops, 2)); 10950 Results.push_back(edx.getValue(1)); 10951 return; 10952 } 10953 case ISD::ATOMIC_CMP_SWAP: { 10954 EVT T = N->getValueType(0); 10955 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair"); 10956 bool Regs64bit = T == MVT::i128; 10957 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32; 10958 SDValue cpInL, cpInH; 10959 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), 10960 DAG.getConstant(0, HalfT)); 10961 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), 10962 DAG.getConstant(1, HalfT)); 10963 cpInL = DAG.getCopyToReg(N->getOperand(0), dl, 10964 Regs64bit ? X86::RAX : X86::EAX, 10965 cpInL, SDValue()); 10966 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl, 10967 Regs64bit ? X86::RDX : X86::EDX, 10968 cpInH, cpInL.getValue(1)); 10969 SDValue swapInL, swapInH; 10970 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), 10971 DAG.getConstant(0, HalfT)); 10972 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), 10973 DAG.getConstant(1, HalfT)); 10974 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl, 10975 Regs64bit ? X86::RBX : X86::EBX, 10976 swapInL, cpInH.getValue(1)); 10977 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl, 10978 Regs64bit ? X86::RCX : X86::ECX, 10979 swapInH, swapInL.getValue(1)); 10980 SDValue Ops[] = { swapInH.getValue(0), 10981 N->getOperand(1), 10982 swapInH.getValue(1) }; 10983 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 10984 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 10985 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG : 10986 X86ISD::LCMPXCHG8_DAG; 10987 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, 10988 Ops, 3, T, MMO); 10989 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, 10990 Regs64bit ? X86::RAX : X86::EAX, 10991 HalfT, Result.getValue(1)); 10992 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl, 10993 Regs64bit ? X86::RDX : X86::EDX, 10994 HalfT, cpOutL.getValue(2)); 10995 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 10996 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF, 2)); 10997 Results.push_back(cpOutH.getValue(1)); 10998 return; 10999 } 11000 case ISD::ATOMIC_LOAD_ADD: 11001 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMADD64_DAG); 11002 return; 11003 case ISD::ATOMIC_LOAD_AND: 11004 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMAND64_DAG); 11005 return; 11006 case ISD::ATOMIC_LOAD_NAND: 11007 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMNAND64_DAG); 11008 return; 11009 case ISD::ATOMIC_LOAD_OR: 11010 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMOR64_DAG); 11011 return; 11012 case ISD::ATOMIC_LOAD_SUB: 11013 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSUB64_DAG); 11014 return; 11015 case ISD::ATOMIC_LOAD_XOR: 11016 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMXOR64_DAG); 11017 return; 11018 case ISD::ATOMIC_SWAP: 11019 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSWAP64_DAG); 11020 return; 11021 case ISD::ATOMIC_LOAD: 11022 ReplaceATOMIC_LOAD(N, Results, DAG); 11023 } 11024} 11025 11026const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 11027 switch (Opcode) { 11028 default: return NULL; 11029 case X86ISD::BSF: return "X86ISD::BSF"; 11030 case X86ISD::BSR: return "X86ISD::BSR"; 11031 case X86ISD::SHLD: return "X86ISD::SHLD"; 11032 case X86ISD::SHRD: return "X86ISD::SHRD"; 11033 case X86ISD::FAND: return "X86ISD::FAND"; 11034 case X86ISD::FOR: return "X86ISD::FOR"; 11035 case X86ISD::FXOR: return "X86ISD::FXOR"; 11036 case X86ISD::FSRL: return "X86ISD::FSRL"; 11037 case X86ISD::FILD: return "X86ISD::FILD"; 11038 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 11039 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 11040 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 11041 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 11042 case X86ISD::FLD: return "X86ISD::FLD"; 11043 case X86ISD::FST: return "X86ISD::FST"; 11044 case X86ISD::CALL: return "X86ISD::CALL"; 11045 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 11046 case X86ISD::BT: return "X86ISD::BT"; 11047 case X86ISD::CMP: return "X86ISD::CMP"; 11048 case X86ISD::COMI: return "X86ISD::COMI"; 11049 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 11050 case X86ISD::SETCC: return "X86ISD::SETCC"; 11051 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY"; 11052 case X86ISD::FSETCCsd: return "X86ISD::FSETCCsd"; 11053 case X86ISD::FSETCCss: return "X86ISD::FSETCCss"; 11054 case X86ISD::CMOV: return "X86ISD::CMOV"; 11055 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 11056 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 11057 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 11058 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 11059 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 11060 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 11061 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP"; 11062 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 11063 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 11064 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 11065 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 11066 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 11067 case X86ISD::PSHUFB: return "X86ISD::PSHUFB"; 11068 case X86ISD::ANDNP: return "X86ISD::ANDNP"; 11069 case X86ISD::PSIGN: return "X86ISD::PSIGN"; 11070 case X86ISD::BLENDV: return "X86ISD::BLENDV"; 11071 case X86ISD::HADD: return "X86ISD::HADD"; 11072 case X86ISD::HSUB: return "X86ISD::HSUB"; 11073 case X86ISD::FHADD: return "X86ISD::FHADD"; 11074 case X86ISD::FHSUB: return "X86ISD::FHSUB"; 11075 case X86ISD::FMAX: return "X86ISD::FMAX"; 11076 case X86ISD::FMIN: return "X86ISD::FMIN"; 11077 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 11078 case X86ISD::FRCP: return "X86ISD::FRCP"; 11079 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 11080 case X86ISD::TLSCALL: return "X86ISD::TLSCALL"; 11081 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 11082 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 11083 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 11084 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG"; 11085 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG"; 11086 case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG"; 11087 case X86ISD::ATOMSUB64_DAG: return "X86ISD::ATOMSUB64_DAG"; 11088 case X86ISD::ATOMOR64_DAG: return "X86ISD::ATOMOR64_DAG"; 11089 case X86ISD::ATOMXOR64_DAG: return "X86ISD::ATOMXOR64_DAG"; 11090 case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG"; 11091 case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG"; 11092 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL"; 11093 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD"; 11094 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ"; 11095 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ"; 11096 case X86ISD::VSHL: return "X86ISD::VSHL"; 11097 case X86ISD::VSRL: return "X86ISD::VSRL"; 11098 case X86ISD::VSRA: return "X86ISD::VSRA"; 11099 case X86ISD::VSHLI: return "X86ISD::VSHLI"; 11100 case X86ISD::VSRLI: return "X86ISD::VSRLI"; 11101 case X86ISD::VSRAI: return "X86ISD::VSRAI"; 11102 case X86ISD::CMPP: return "X86ISD::CMPP"; 11103 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ"; 11104 case X86ISD::PCMPGT: return "X86ISD::PCMPGT"; 11105 case X86ISD::ADD: return "X86ISD::ADD"; 11106 case X86ISD::SUB: return "X86ISD::SUB"; 11107 case X86ISD::ADC: return "X86ISD::ADC"; 11108 case X86ISD::SBB: return "X86ISD::SBB"; 11109 case X86ISD::SMUL: return "X86ISD::SMUL"; 11110 case X86ISD::UMUL: return "X86ISD::UMUL"; 11111 case X86ISD::INC: return "X86ISD::INC"; 11112 case X86ISD::DEC: return "X86ISD::DEC"; 11113 case X86ISD::OR: return "X86ISD::OR"; 11114 case X86ISD::XOR: return "X86ISD::XOR"; 11115 case X86ISD::AND: return "X86ISD::AND"; 11116 case X86ISD::ANDN: return "X86ISD::ANDN"; 11117 case X86ISD::BLSI: return "X86ISD::BLSI"; 11118 case X86ISD::BLSMSK: return "X86ISD::BLSMSK"; 11119 case X86ISD::BLSR: return "X86ISD::BLSR"; 11120 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM"; 11121 case X86ISD::PTEST: return "X86ISD::PTEST"; 11122 case X86ISD::TESTP: return "X86ISD::TESTP"; 11123 case X86ISD::PALIGN: return "X86ISD::PALIGN"; 11124 case X86ISD::PSHUFD: return "X86ISD::PSHUFD"; 11125 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW"; 11126 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW"; 11127 case X86ISD::SHUFP: return "X86ISD::SHUFP"; 11128 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS"; 11129 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD"; 11130 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS"; 11131 case X86ISD::MOVLPS: return "X86ISD::MOVLPS"; 11132 case X86ISD::MOVLPD: return "X86ISD::MOVLPD"; 11133 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP"; 11134 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP"; 11135 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP"; 11136 case X86ISD::MOVSD: return "X86ISD::MOVSD"; 11137 case X86ISD::MOVSS: return "X86ISD::MOVSS"; 11138 case X86ISD::UNPCKL: return "X86ISD::UNPCKL"; 11139 case X86ISD::UNPCKH: return "X86ISD::UNPCKH"; 11140 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST"; 11141 case X86ISD::VPERMILP: return "X86ISD::VPERMILP"; 11142 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128"; 11143 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS"; 11144 case X86ISD::VAARG_64: return "X86ISD::VAARG_64"; 11145 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA"; 11146 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER"; 11147 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA"; 11148 } 11149} 11150 11151// isLegalAddressingMode - Return true if the addressing mode represented 11152// by AM is legal for this target, for a load/store of the specified type. 11153bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 11154 Type *Ty) const { 11155 // X86 supports extremely general addressing modes. 11156 CodeModel::Model M = getTargetMachine().getCodeModel(); 11157 Reloc::Model R = getTargetMachine().getRelocationModel(); 11158 11159 // X86 allows a sign-extended 32-bit immediate field as a displacement. 11160 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL)) 11161 return false; 11162 11163 if (AM.BaseGV) { 11164 unsigned GVFlags = 11165 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine()); 11166 11167 // If a reference to this global requires an extra load, we can't fold it. 11168 if (isGlobalStubReference(GVFlags)) 11169 return false; 11170 11171 // If BaseGV requires a register for the PIC base, we cannot also have a 11172 // BaseReg specified. 11173 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags)) 11174 return false; 11175 11176 // If lower 4G is not available, then we must use rip-relative addressing. 11177 if ((M != CodeModel::Small || R != Reloc::Static) && 11178 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1)) 11179 return false; 11180 } 11181 11182 switch (AM.Scale) { 11183 case 0: 11184 case 1: 11185 case 2: 11186 case 4: 11187 case 8: 11188 // These scales always work. 11189 break; 11190 case 3: 11191 case 5: 11192 case 9: 11193 // These scales are formed with basereg+scalereg. Only accept if there is 11194 // no basereg yet. 11195 if (AM.HasBaseReg) 11196 return false; 11197 break; 11198 default: // Other stuff never works. 11199 return false; 11200 } 11201 11202 return true; 11203} 11204 11205 11206bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 11207 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 11208 return false; 11209 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 11210 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 11211 if (NumBits1 <= NumBits2) 11212 return false; 11213 return true; 11214} 11215 11216bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 11217 if (!VT1.isInteger() || !VT2.isInteger()) 11218 return false; 11219 unsigned NumBits1 = VT1.getSizeInBits(); 11220 unsigned NumBits2 = VT2.getSizeInBits(); 11221 if (NumBits1 <= NumBits2) 11222 return false; 11223 return true; 11224} 11225 11226bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { 11227 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 11228 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit(); 11229} 11230 11231bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { 11232 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 11233 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit(); 11234} 11235 11236bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const { 11237 // i16 instructions are longer (0x66 prefix) and potentially slower. 11238 return !(VT1 == MVT::i32 && VT2 == MVT::i16); 11239} 11240 11241/// isShuffleMaskLegal - Targets can use this to indicate that they only 11242/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 11243/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 11244/// are assumed to be legal. 11245bool 11246X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 11247 EVT VT) const { 11248 // Very little shuffling can be done for 64-bit vectors right now. 11249 if (VT.getSizeInBits() == 64) 11250 return false; 11251 11252 // FIXME: pshufb, blends, shifts. 11253 return (VT.getVectorNumElements() == 2 || 11254 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 11255 isMOVLMask(M, VT) || 11256 isSHUFPMask(M, VT, Subtarget->hasAVX()) || 11257 isPSHUFDMask(M, VT) || 11258 isPSHUFHWMask(M, VT) || 11259 isPSHUFLWMask(M, VT) || 11260 isPALIGNRMask(M, VT, Subtarget) || 11261 isUNPCKLMask(M, VT, Subtarget->hasAVX2()) || 11262 isUNPCKHMask(M, VT, Subtarget->hasAVX2()) || 11263 isUNPCKL_v_undef_Mask(M, VT, Subtarget->hasAVX2()) || 11264 isUNPCKH_v_undef_Mask(M, VT, Subtarget->hasAVX2())); 11265} 11266 11267bool 11268X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, 11269 EVT VT) const { 11270 unsigned NumElts = VT.getVectorNumElements(); 11271 // FIXME: This collection of masks seems suspect. 11272 if (NumElts == 2) 11273 return true; 11274 if (NumElts == 4 && VT.getSizeInBits() == 128) { 11275 return (isMOVLMask(Mask, VT) || 11276 isCommutedMOVLMask(Mask, VT, true) || 11277 isSHUFPMask(Mask, VT, Subtarget->hasAVX()) || 11278 isSHUFPMask(Mask, VT, Subtarget->hasAVX(), /* Commuted */ true)); 11279 } 11280 return false; 11281} 11282 11283//===----------------------------------------------------------------------===// 11284// X86 Scheduler Hooks 11285//===----------------------------------------------------------------------===// 11286 11287// private utility function 11288MachineBasicBlock * 11289X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr, 11290 MachineBasicBlock *MBB, 11291 unsigned regOpc, 11292 unsigned immOpc, 11293 unsigned LoadOpc, 11294 unsigned CXchgOpc, 11295 unsigned notOpc, 11296 unsigned EAXreg, 11297 TargetRegisterClass *RC, 11298 bool invSrc) const { 11299 // For the atomic bitwise operator, we generate 11300 // thisMBB: 11301 // newMBB: 11302 // ld t1 = [bitinstr.addr] 11303 // op t2 = t1, [bitinstr.val] 11304 // mov EAX = t1 11305 // lcs dest = [bitinstr.addr], t2 [EAX is implicit] 11306 // bz newMBB 11307 // fallthrough -->nextMBB 11308 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11309 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 11310 MachineFunction::iterator MBBIter = MBB; 11311 ++MBBIter; 11312 11313 /// First build the CFG 11314 MachineFunction *F = MBB->getParent(); 11315 MachineBasicBlock *thisMBB = MBB; 11316 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 11317 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 11318 F->insert(MBBIter, newMBB); 11319 F->insert(MBBIter, nextMBB); 11320 11321 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 11322 nextMBB->splice(nextMBB->begin(), thisMBB, 11323 llvm::next(MachineBasicBlock::iterator(bInstr)), 11324 thisMBB->end()); 11325 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 11326 11327 // Update thisMBB to fall through to newMBB 11328 thisMBB->addSuccessor(newMBB); 11329 11330 // newMBB jumps to itself and fall through to nextMBB 11331 newMBB->addSuccessor(nextMBB); 11332 newMBB->addSuccessor(newMBB); 11333 11334 // Insert instructions into newMBB based on incoming instruction 11335 assert(bInstr->getNumOperands() < X86::AddrNumOperands + 4 && 11336 "unexpected number of operands"); 11337 DebugLoc dl = bInstr->getDebugLoc(); 11338 MachineOperand& destOper = bInstr->getOperand(0); 11339 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 11340 int numArgs = bInstr->getNumOperands() - 1; 11341 for (int i=0; i < numArgs; ++i) 11342 argOpers[i] = &bInstr->getOperand(i+1); 11343 11344 // x86 address has 4 operands: base, index, scale, and displacement 11345 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 11346 int valArgIndx = lastAddrIndx + 1; 11347 11348 unsigned t1 = F->getRegInfo().createVirtualRegister(RC); 11349 MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(LoadOpc), t1); 11350 for (int i=0; i <= lastAddrIndx; ++i) 11351 (*MIB).addOperand(*argOpers[i]); 11352 11353 unsigned tt = F->getRegInfo().createVirtualRegister(RC); 11354 if (invSrc) { 11355 MIB = BuildMI(newMBB, dl, TII->get(notOpc), tt).addReg(t1); 11356 } 11357 else 11358 tt = t1; 11359 11360 unsigned t2 = F->getRegInfo().createVirtualRegister(RC); 11361 assert((argOpers[valArgIndx]->isReg() || 11362 argOpers[valArgIndx]->isImm()) && 11363 "invalid operand"); 11364 if (argOpers[valArgIndx]->isReg()) 11365 MIB = BuildMI(newMBB, dl, TII->get(regOpc), t2); 11366 else 11367 MIB = BuildMI(newMBB, dl, TII->get(immOpc), t2); 11368 MIB.addReg(tt); 11369 (*MIB).addOperand(*argOpers[valArgIndx]); 11370 11371 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), EAXreg); 11372 MIB.addReg(t1); 11373 11374 MIB = BuildMI(newMBB, dl, TII->get(CXchgOpc)); 11375 for (int i=0; i <= lastAddrIndx; ++i) 11376 (*MIB).addOperand(*argOpers[i]); 11377 MIB.addReg(t2); 11378 assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 11379 (*MIB).setMemRefs(bInstr->memoperands_begin(), 11380 bInstr->memoperands_end()); 11381 11382 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg()); 11383 MIB.addReg(EAXreg); 11384 11385 // insert branch 11386 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 11387 11388 bInstr->eraseFromParent(); // The pseudo instruction is gone now. 11389 return nextMBB; 11390} 11391 11392// private utility function: 64 bit atomics on 32 bit host. 11393MachineBasicBlock * 11394X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr, 11395 MachineBasicBlock *MBB, 11396 unsigned regOpcL, 11397 unsigned regOpcH, 11398 unsigned immOpcL, 11399 unsigned immOpcH, 11400 bool invSrc) const { 11401 // For the atomic bitwise operator, we generate 11402 // thisMBB (instructions are in pairs, except cmpxchg8b) 11403 // ld t1,t2 = [bitinstr.addr] 11404 // newMBB: 11405 // out1, out2 = phi (thisMBB, t1/t2) (newMBB, t3/t4) 11406 // op t5, t6 <- out1, out2, [bitinstr.val] 11407 // (for SWAP, substitute: mov t5, t6 <- [bitinstr.val]) 11408 // mov ECX, EBX <- t5, t6 11409 // mov EAX, EDX <- t1, t2 11410 // cmpxchg8b [bitinstr.addr] [EAX, EDX, EBX, ECX implicit] 11411 // mov t3, t4 <- EAX, EDX 11412 // bz newMBB 11413 // result in out1, out2 11414 // fallthrough -->nextMBB 11415 11416 const TargetRegisterClass *RC = X86::GR32RegisterClass; 11417 const unsigned LoadOpc = X86::MOV32rm; 11418 const unsigned NotOpc = X86::NOT32r; 11419 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11420 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 11421 MachineFunction::iterator MBBIter = MBB; 11422 ++MBBIter; 11423 11424 /// First build the CFG 11425 MachineFunction *F = MBB->getParent(); 11426 MachineBasicBlock *thisMBB = MBB; 11427 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 11428 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 11429 F->insert(MBBIter, newMBB); 11430 F->insert(MBBIter, nextMBB); 11431 11432 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 11433 nextMBB->splice(nextMBB->begin(), thisMBB, 11434 llvm::next(MachineBasicBlock::iterator(bInstr)), 11435 thisMBB->end()); 11436 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 11437 11438 // Update thisMBB to fall through to newMBB 11439 thisMBB->addSuccessor(newMBB); 11440 11441 // newMBB jumps to itself and fall through to nextMBB 11442 newMBB->addSuccessor(nextMBB); 11443 newMBB->addSuccessor(newMBB); 11444 11445 DebugLoc dl = bInstr->getDebugLoc(); 11446 // Insert instructions into newMBB based on incoming instruction 11447 // There are 8 "real" operands plus 9 implicit def/uses, ignored here. 11448 assert(bInstr->getNumOperands() < X86::AddrNumOperands + 14 && 11449 "unexpected number of operands"); 11450 MachineOperand& dest1Oper = bInstr->getOperand(0); 11451 MachineOperand& dest2Oper = bInstr->getOperand(1); 11452 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 11453 for (int i=0; i < 2 + X86::AddrNumOperands; ++i) { 11454 argOpers[i] = &bInstr->getOperand(i+2); 11455 11456 // We use some of the operands multiple times, so conservatively just 11457 // clear any kill flags that might be present. 11458 if (argOpers[i]->isReg() && argOpers[i]->isUse()) 11459 argOpers[i]->setIsKill(false); 11460 } 11461 11462 // x86 address has 5 operands: base, index, scale, displacement, and segment. 11463 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 11464 11465 unsigned t1 = F->getRegInfo().createVirtualRegister(RC); 11466 MachineInstrBuilder MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t1); 11467 for (int i=0; i <= lastAddrIndx; ++i) 11468 (*MIB).addOperand(*argOpers[i]); 11469 unsigned t2 = F->getRegInfo().createVirtualRegister(RC); 11470 MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t2); 11471 // add 4 to displacement. 11472 for (int i=0; i <= lastAddrIndx-2; ++i) 11473 (*MIB).addOperand(*argOpers[i]); 11474 MachineOperand newOp3 = *(argOpers[3]); 11475 if (newOp3.isImm()) 11476 newOp3.setImm(newOp3.getImm()+4); 11477 else 11478 newOp3.setOffset(newOp3.getOffset()+4); 11479 (*MIB).addOperand(newOp3); 11480 (*MIB).addOperand(*argOpers[lastAddrIndx]); 11481 11482 // t3/4 are defined later, at the bottom of the loop 11483 unsigned t3 = F->getRegInfo().createVirtualRegister(RC); 11484 unsigned t4 = F->getRegInfo().createVirtualRegister(RC); 11485 BuildMI(newMBB, dl, TII->get(X86::PHI), dest1Oper.getReg()) 11486 .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(newMBB); 11487 BuildMI(newMBB, dl, TII->get(X86::PHI), dest2Oper.getReg()) 11488 .addReg(t2).addMBB(thisMBB).addReg(t4).addMBB(newMBB); 11489 11490 // The subsequent operations should be using the destination registers of 11491 //the PHI instructions. 11492 if (invSrc) { 11493 t1 = F->getRegInfo().createVirtualRegister(RC); 11494 t2 = F->getRegInfo().createVirtualRegister(RC); 11495 MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t1).addReg(dest1Oper.getReg()); 11496 MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t2).addReg(dest2Oper.getReg()); 11497 } else { 11498 t1 = dest1Oper.getReg(); 11499 t2 = dest2Oper.getReg(); 11500 } 11501 11502 int valArgIndx = lastAddrIndx + 1; 11503 assert((argOpers[valArgIndx]->isReg() || 11504 argOpers[valArgIndx]->isImm()) && 11505 "invalid operand"); 11506 unsigned t5 = F->getRegInfo().createVirtualRegister(RC); 11507 unsigned t6 = F->getRegInfo().createVirtualRegister(RC); 11508 if (argOpers[valArgIndx]->isReg()) 11509 MIB = BuildMI(newMBB, dl, TII->get(regOpcL), t5); 11510 else 11511 MIB = BuildMI(newMBB, dl, TII->get(immOpcL), t5); 11512 if (regOpcL != X86::MOV32rr) 11513 MIB.addReg(t1); 11514 (*MIB).addOperand(*argOpers[valArgIndx]); 11515 assert(argOpers[valArgIndx + 1]->isReg() == 11516 argOpers[valArgIndx]->isReg()); 11517 assert(argOpers[valArgIndx + 1]->isImm() == 11518 argOpers[valArgIndx]->isImm()); 11519 if (argOpers[valArgIndx + 1]->isReg()) 11520 MIB = BuildMI(newMBB, dl, TII->get(regOpcH), t6); 11521 else 11522 MIB = BuildMI(newMBB, dl, TII->get(immOpcH), t6); 11523 if (regOpcH != X86::MOV32rr) 11524 MIB.addReg(t2); 11525 (*MIB).addOperand(*argOpers[valArgIndx + 1]); 11526 11527 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX); 11528 MIB.addReg(t1); 11529 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EDX); 11530 MIB.addReg(t2); 11531 11532 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EBX); 11533 MIB.addReg(t5); 11534 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::ECX); 11535 MIB.addReg(t6); 11536 11537 MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG8B)); 11538 for (int i=0; i <= lastAddrIndx; ++i) 11539 (*MIB).addOperand(*argOpers[i]); 11540 11541 assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 11542 (*MIB).setMemRefs(bInstr->memoperands_begin(), 11543 bInstr->memoperands_end()); 11544 11545 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t3); 11546 MIB.addReg(X86::EAX); 11547 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t4); 11548 MIB.addReg(X86::EDX); 11549 11550 // insert branch 11551 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 11552 11553 bInstr->eraseFromParent(); // The pseudo instruction is gone now. 11554 return nextMBB; 11555} 11556 11557// private utility function 11558MachineBasicBlock * 11559X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr, 11560 MachineBasicBlock *MBB, 11561 unsigned cmovOpc) const { 11562 // For the atomic min/max operator, we generate 11563 // thisMBB: 11564 // newMBB: 11565 // ld t1 = [min/max.addr] 11566 // mov t2 = [min/max.val] 11567 // cmp t1, t2 11568 // cmov[cond] t2 = t1 11569 // mov EAX = t1 11570 // lcs dest = [bitinstr.addr], t2 [EAX is implicit] 11571 // bz newMBB 11572 // fallthrough -->nextMBB 11573 // 11574 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11575 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 11576 MachineFunction::iterator MBBIter = MBB; 11577 ++MBBIter; 11578 11579 /// First build the CFG 11580 MachineFunction *F = MBB->getParent(); 11581 MachineBasicBlock *thisMBB = MBB; 11582 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 11583 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 11584 F->insert(MBBIter, newMBB); 11585 F->insert(MBBIter, nextMBB); 11586 11587 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 11588 nextMBB->splice(nextMBB->begin(), thisMBB, 11589 llvm::next(MachineBasicBlock::iterator(mInstr)), 11590 thisMBB->end()); 11591 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 11592 11593 // Update thisMBB to fall through to newMBB 11594 thisMBB->addSuccessor(newMBB); 11595 11596 // newMBB jumps to newMBB and fall through to nextMBB 11597 newMBB->addSuccessor(nextMBB); 11598 newMBB->addSuccessor(newMBB); 11599 11600 DebugLoc dl = mInstr->getDebugLoc(); 11601 // Insert instructions into newMBB based on incoming instruction 11602 assert(mInstr->getNumOperands() < X86::AddrNumOperands + 4 && 11603 "unexpected number of operands"); 11604 MachineOperand& destOper = mInstr->getOperand(0); 11605 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 11606 int numArgs = mInstr->getNumOperands() - 1; 11607 for (int i=0; i < numArgs; ++i) 11608 argOpers[i] = &mInstr->getOperand(i+1); 11609 11610 // x86 address has 4 operands: base, index, scale, and displacement 11611 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 11612 int valArgIndx = lastAddrIndx + 1; 11613 11614 unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 11615 MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rm), t1); 11616 for (int i=0; i <= lastAddrIndx; ++i) 11617 (*MIB).addOperand(*argOpers[i]); 11618 11619 // We only support register and immediate values 11620 assert((argOpers[valArgIndx]->isReg() || 11621 argOpers[valArgIndx]->isImm()) && 11622 "invalid operand"); 11623 11624 unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 11625 if (argOpers[valArgIndx]->isReg()) 11626 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t2); 11627 else 11628 MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), t2); 11629 (*MIB).addOperand(*argOpers[valArgIndx]); 11630 11631 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX); 11632 MIB.addReg(t1); 11633 11634 MIB = BuildMI(newMBB, dl, TII->get(X86::CMP32rr)); 11635 MIB.addReg(t1); 11636 MIB.addReg(t2); 11637 11638 // Generate movc 11639 unsigned t3 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 11640 MIB = BuildMI(newMBB, dl, TII->get(cmovOpc),t3); 11641 MIB.addReg(t2); 11642 MIB.addReg(t1); 11643 11644 // Cmp and exchange if none has modified the memory location 11645 MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG32)); 11646 for (int i=0; i <= lastAddrIndx; ++i) 11647 (*MIB).addOperand(*argOpers[i]); 11648 MIB.addReg(t3); 11649 assert(mInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 11650 (*MIB).setMemRefs(mInstr->memoperands_begin(), 11651 mInstr->memoperands_end()); 11652 11653 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg()); 11654 MIB.addReg(X86::EAX); 11655 11656 // insert branch 11657 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 11658 11659 mInstr->eraseFromParent(); // The pseudo instruction is gone now. 11660 return nextMBB; 11661} 11662 11663// FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8 11664// or XMM0_V32I8 in AVX all of this code can be replaced with that 11665// in the .td file. 11666MachineBasicBlock * 11667X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB, 11668 unsigned numArgs, bool memArg) const { 11669 assert(Subtarget->hasSSE42() && 11670 "Target must have SSE4.2 or AVX features enabled"); 11671 11672 DebugLoc dl = MI->getDebugLoc(); 11673 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11674 unsigned Opc; 11675 if (!Subtarget->hasAVX()) { 11676 if (memArg) 11677 Opc = numArgs == 3 ? X86::PCMPISTRM128rm : X86::PCMPESTRM128rm; 11678 else 11679 Opc = numArgs == 3 ? X86::PCMPISTRM128rr : X86::PCMPESTRM128rr; 11680 } else { 11681 if (memArg) 11682 Opc = numArgs == 3 ? X86::VPCMPISTRM128rm : X86::VPCMPESTRM128rm; 11683 else 11684 Opc = numArgs == 3 ? X86::VPCMPISTRM128rr : X86::VPCMPESTRM128rr; 11685 } 11686 11687 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc)); 11688 for (unsigned i = 0; i < numArgs; ++i) { 11689 MachineOperand &Op = MI->getOperand(i+1); 11690 if (!(Op.isReg() && Op.isImplicit())) 11691 MIB.addOperand(Op); 11692 } 11693 BuildMI(*BB, MI, dl, 11694 TII->get(Subtarget->hasAVX() ? X86::VMOVAPSrr : X86::MOVAPSrr), 11695 MI->getOperand(0).getReg()) 11696 .addReg(X86::XMM0); 11697 11698 MI->eraseFromParent(); 11699 return BB; 11700} 11701 11702MachineBasicBlock * 11703X86TargetLowering::EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB) const { 11704 DebugLoc dl = MI->getDebugLoc(); 11705 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11706 11707 // Address into RAX/EAX, other two args into ECX, EDX. 11708 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; 11709 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 11710 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg); 11711 for (int i = 0; i < X86::AddrNumOperands; ++i) 11712 MIB.addOperand(MI->getOperand(i)); 11713 11714 unsigned ValOps = X86::AddrNumOperands; 11715 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) 11716 .addReg(MI->getOperand(ValOps).getReg()); 11717 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX) 11718 .addReg(MI->getOperand(ValOps+1).getReg()); 11719 11720 // The instruction doesn't actually take any operands though. 11721 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr)); 11722 11723 MI->eraseFromParent(); // The pseudo is gone now. 11724 return BB; 11725} 11726 11727MachineBasicBlock * 11728X86TargetLowering::EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const { 11729 DebugLoc dl = MI->getDebugLoc(); 11730 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11731 11732 // First arg in ECX, the second in EAX. 11733 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) 11734 .addReg(MI->getOperand(0).getReg()); 11735 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EAX) 11736 .addReg(MI->getOperand(1).getReg()); 11737 11738 // The instruction doesn't actually take any operands though. 11739 BuildMI(*BB, MI, dl, TII->get(X86::MWAITrr)); 11740 11741 MI->eraseFromParent(); // The pseudo is gone now. 11742 return BB; 11743} 11744 11745MachineBasicBlock * 11746X86TargetLowering::EmitVAARG64WithCustomInserter( 11747 MachineInstr *MI, 11748 MachineBasicBlock *MBB) const { 11749 // Emit va_arg instruction on X86-64. 11750 11751 // Operands to this pseudo-instruction: 11752 // 0 ) Output : destination address (reg) 11753 // 1-5) Input : va_list address (addr, i64mem) 11754 // 6 ) ArgSize : Size (in bytes) of vararg type 11755 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset 11756 // 8 ) Align : Alignment of type 11757 // 9 ) EFLAGS (implicit-def) 11758 11759 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!"); 11760 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands"); 11761 11762 unsigned DestReg = MI->getOperand(0).getReg(); 11763 MachineOperand &Base = MI->getOperand(1); 11764 MachineOperand &Scale = MI->getOperand(2); 11765 MachineOperand &Index = MI->getOperand(3); 11766 MachineOperand &Disp = MI->getOperand(4); 11767 MachineOperand &Segment = MI->getOperand(5); 11768 unsigned ArgSize = MI->getOperand(6).getImm(); 11769 unsigned ArgMode = MI->getOperand(7).getImm(); 11770 unsigned Align = MI->getOperand(8).getImm(); 11771 11772 // Memory Reference 11773 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand"); 11774 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 11775 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 11776 11777 // Machine Information 11778 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11779 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 11780 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64); 11781 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32); 11782 DebugLoc DL = MI->getDebugLoc(); 11783 11784 // struct va_list { 11785 // i32 gp_offset 11786 // i32 fp_offset 11787 // i64 overflow_area (address) 11788 // i64 reg_save_area (address) 11789 // } 11790 // sizeof(va_list) = 24 11791 // alignment(va_list) = 8 11792 11793 unsigned TotalNumIntRegs = 6; 11794 unsigned TotalNumXMMRegs = 8; 11795 bool UseGPOffset = (ArgMode == 1); 11796 bool UseFPOffset = (ArgMode == 2); 11797 unsigned MaxOffset = TotalNumIntRegs * 8 + 11798 (UseFPOffset ? TotalNumXMMRegs * 16 : 0); 11799 11800 /* Align ArgSize to a multiple of 8 */ 11801 unsigned ArgSizeA8 = (ArgSize + 7) & ~7; 11802 bool NeedsAlign = (Align > 8); 11803 11804 MachineBasicBlock *thisMBB = MBB; 11805 MachineBasicBlock *overflowMBB; 11806 MachineBasicBlock *offsetMBB; 11807 MachineBasicBlock *endMBB; 11808 11809 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB 11810 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB 11811 unsigned OffsetReg = 0; 11812 11813 if (!UseGPOffset && !UseFPOffset) { 11814 // If we only pull from the overflow region, we don't create a branch. 11815 // We don't need to alter control flow. 11816 OffsetDestReg = 0; // unused 11817 OverflowDestReg = DestReg; 11818 11819 offsetMBB = NULL; 11820 overflowMBB = thisMBB; 11821 endMBB = thisMBB; 11822 } else { 11823 // First emit code to check if gp_offset (or fp_offset) is below the bound. 11824 // If so, pull the argument from reg_save_area. (branch to offsetMBB) 11825 // If not, pull from overflow_area. (branch to overflowMBB) 11826 // 11827 // thisMBB 11828 // | . 11829 // | . 11830 // offsetMBB overflowMBB 11831 // | . 11832 // | . 11833 // endMBB 11834 11835 // Registers for the PHI in endMBB 11836 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass); 11837 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass); 11838 11839 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 11840 MachineFunction *MF = MBB->getParent(); 11841 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB); 11842 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB); 11843 endMBB = MF->CreateMachineBasicBlock(LLVM_BB); 11844 11845 MachineFunction::iterator MBBIter = MBB; 11846 ++MBBIter; 11847 11848 // Insert the new basic blocks 11849 MF->insert(MBBIter, offsetMBB); 11850 MF->insert(MBBIter, overflowMBB); 11851 MF->insert(MBBIter, endMBB); 11852 11853 // Transfer the remainder of MBB and its successor edges to endMBB. 11854 endMBB->splice(endMBB->begin(), thisMBB, 11855 llvm::next(MachineBasicBlock::iterator(MI)), 11856 thisMBB->end()); 11857 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 11858 11859 // Make offsetMBB and overflowMBB successors of thisMBB 11860 thisMBB->addSuccessor(offsetMBB); 11861 thisMBB->addSuccessor(overflowMBB); 11862 11863 // endMBB is a successor of both offsetMBB and overflowMBB 11864 offsetMBB->addSuccessor(endMBB); 11865 overflowMBB->addSuccessor(endMBB); 11866 11867 // Load the offset value into a register 11868 OffsetReg = MRI.createVirtualRegister(OffsetRegClass); 11869 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg) 11870 .addOperand(Base) 11871 .addOperand(Scale) 11872 .addOperand(Index) 11873 .addDisp(Disp, UseFPOffset ? 4 : 0) 11874 .addOperand(Segment) 11875 .setMemRefs(MMOBegin, MMOEnd); 11876 11877 // Check if there is enough room left to pull this argument. 11878 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri)) 11879 .addReg(OffsetReg) 11880 .addImm(MaxOffset + 8 - ArgSizeA8); 11881 11882 // Branch to "overflowMBB" if offset >= max 11883 // Fall through to "offsetMBB" otherwise 11884 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE))) 11885 .addMBB(overflowMBB); 11886 } 11887 11888 // In offsetMBB, emit code to use the reg_save_area. 11889 if (offsetMBB) { 11890 assert(OffsetReg != 0); 11891 11892 // Read the reg_save_area address. 11893 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass); 11894 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg) 11895 .addOperand(Base) 11896 .addOperand(Scale) 11897 .addOperand(Index) 11898 .addDisp(Disp, 16) 11899 .addOperand(Segment) 11900 .setMemRefs(MMOBegin, MMOEnd); 11901 11902 // Zero-extend the offset 11903 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); 11904 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64) 11905 .addImm(0) 11906 .addReg(OffsetReg) 11907 .addImm(X86::sub_32bit); 11908 11909 // Add the offset to the reg_save_area to get the final address. 11910 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg) 11911 .addReg(OffsetReg64) 11912 .addReg(RegSaveReg); 11913 11914 // Compute the offset for the next argument 11915 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass); 11916 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg) 11917 .addReg(OffsetReg) 11918 .addImm(UseFPOffset ? 16 : 8); 11919 11920 // Store it back into the va_list. 11921 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr)) 11922 .addOperand(Base) 11923 .addOperand(Scale) 11924 .addOperand(Index) 11925 .addDisp(Disp, UseFPOffset ? 4 : 0) 11926 .addOperand(Segment) 11927 .addReg(NextOffsetReg) 11928 .setMemRefs(MMOBegin, MMOEnd); 11929 11930 // Jump to endMBB 11931 BuildMI(offsetMBB, DL, TII->get(X86::JMP_4)) 11932 .addMBB(endMBB); 11933 } 11934 11935 // 11936 // Emit code to use overflow area 11937 // 11938 11939 // Load the overflow_area address into a register. 11940 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass); 11941 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg) 11942 .addOperand(Base) 11943 .addOperand(Scale) 11944 .addOperand(Index) 11945 .addDisp(Disp, 8) 11946 .addOperand(Segment) 11947 .setMemRefs(MMOBegin, MMOEnd); 11948 11949 // If we need to align it, do so. Otherwise, just copy the address 11950 // to OverflowDestReg. 11951 if (NeedsAlign) { 11952 // Align the overflow address 11953 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2"); 11954 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass); 11955 11956 // aligned_addr = (addr + (align-1)) & ~(align-1) 11957 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg) 11958 .addReg(OverflowAddrReg) 11959 .addImm(Align-1); 11960 11961 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg) 11962 .addReg(TmpReg) 11963 .addImm(~(uint64_t)(Align-1)); 11964 } else { 11965 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg) 11966 .addReg(OverflowAddrReg); 11967 } 11968 11969 // Compute the next overflow address after this argument. 11970 // (the overflow address should be kept 8-byte aligned) 11971 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass); 11972 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg) 11973 .addReg(OverflowDestReg) 11974 .addImm(ArgSizeA8); 11975 11976 // Store the new overflow address. 11977 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr)) 11978 .addOperand(Base) 11979 .addOperand(Scale) 11980 .addOperand(Index) 11981 .addDisp(Disp, 8) 11982 .addOperand(Segment) 11983 .addReg(NextAddrReg) 11984 .setMemRefs(MMOBegin, MMOEnd); 11985 11986 // If we branched, emit the PHI to the front of endMBB. 11987 if (offsetMBB) { 11988 BuildMI(*endMBB, endMBB->begin(), DL, 11989 TII->get(X86::PHI), DestReg) 11990 .addReg(OffsetDestReg).addMBB(offsetMBB) 11991 .addReg(OverflowDestReg).addMBB(overflowMBB); 11992 } 11993 11994 // Erase the pseudo instruction 11995 MI->eraseFromParent(); 11996 11997 return endMBB; 11998} 11999 12000MachineBasicBlock * 12001X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( 12002 MachineInstr *MI, 12003 MachineBasicBlock *MBB) const { 12004 // Emit code to save XMM registers to the stack. The ABI says that the 12005 // number of registers to save is given in %al, so it's theoretically 12006 // possible to do an indirect jump trick to avoid saving all of them, 12007 // however this code takes a simpler approach and just executes all 12008 // of the stores if %al is non-zero. It's less code, and it's probably 12009 // easier on the hardware branch predictor, and stores aren't all that 12010 // expensive anyway. 12011 12012 // Create the new basic blocks. One block contains all the XMM stores, 12013 // and one block is the final destination regardless of whether any 12014 // stores were performed. 12015 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 12016 MachineFunction *F = MBB->getParent(); 12017 MachineFunction::iterator MBBIter = MBB; 12018 ++MBBIter; 12019 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB); 12020 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB); 12021 F->insert(MBBIter, XMMSaveMBB); 12022 F->insert(MBBIter, EndMBB); 12023 12024 // Transfer the remainder of MBB and its successor edges to EndMBB. 12025 EndMBB->splice(EndMBB->begin(), MBB, 12026 llvm::next(MachineBasicBlock::iterator(MI)), 12027 MBB->end()); 12028 EndMBB->transferSuccessorsAndUpdatePHIs(MBB); 12029 12030 // The original block will now fall through to the XMM save block. 12031 MBB->addSuccessor(XMMSaveMBB); 12032 // The XMMSaveMBB will fall through to the end block. 12033 XMMSaveMBB->addSuccessor(EndMBB); 12034 12035 // Now add the instructions. 12036 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12037 DebugLoc DL = MI->getDebugLoc(); 12038 12039 unsigned CountReg = MI->getOperand(0).getReg(); 12040 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm(); 12041 int64_t VarArgsFPOffset = MI->getOperand(2).getImm(); 12042 12043 if (!Subtarget->isTargetWin64()) { 12044 // If %al is 0, branch around the XMM save block. 12045 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg); 12046 BuildMI(MBB, DL, TII->get(X86::JE_4)).addMBB(EndMBB); 12047 MBB->addSuccessor(EndMBB); 12048 } 12049 12050 unsigned MOVOpc = Subtarget->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr; 12051 // In the XMM save block, save all the XMM argument registers. 12052 for (int i = 3, e = MI->getNumOperands(); i != e; ++i) { 12053 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset; 12054 MachineMemOperand *MMO = 12055 F->getMachineMemOperand( 12056 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset), 12057 MachineMemOperand::MOStore, 12058 /*Size=*/16, /*Align=*/16); 12059 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc)) 12060 .addFrameIndex(RegSaveFrameIndex) 12061 .addImm(/*Scale=*/1) 12062 .addReg(/*IndexReg=*/0) 12063 .addImm(/*Disp=*/Offset) 12064 .addReg(/*Segment=*/0) 12065 .addReg(MI->getOperand(i).getReg()) 12066 .addMemOperand(MMO); 12067 } 12068 12069 MI->eraseFromParent(); // The pseudo instruction is gone now. 12070 12071 return EndMBB; 12072} 12073 12074MachineBasicBlock * 12075X86TargetLowering::EmitLoweredSelect(MachineInstr *MI, 12076 MachineBasicBlock *BB) const { 12077 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12078 DebugLoc DL = MI->getDebugLoc(); 12079 12080 // To "insert" a SELECT_CC instruction, we actually have to insert the 12081 // diamond control-flow pattern. The incoming instruction knows the 12082 // destination vreg to set, the condition code register to branch on, the 12083 // true/false values to select between, and a branch opcode to use. 12084 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 12085 MachineFunction::iterator It = BB; 12086 ++It; 12087 12088 // thisMBB: 12089 // ... 12090 // TrueVal = ... 12091 // cmpTY ccX, r1, r2 12092 // bCC copy1MBB 12093 // fallthrough --> copy0MBB 12094 MachineBasicBlock *thisMBB = BB; 12095 MachineFunction *F = BB->getParent(); 12096 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 12097 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 12098 F->insert(It, copy0MBB); 12099 F->insert(It, sinkMBB); 12100 12101 // If the EFLAGS register isn't dead in the terminator, then claim that it's 12102 // live into the sink and copy blocks. 12103 if (!MI->killsRegister(X86::EFLAGS)) { 12104 copy0MBB->addLiveIn(X86::EFLAGS); 12105 sinkMBB->addLiveIn(X86::EFLAGS); 12106 } 12107 12108 // Transfer the remainder of BB and its successor edges to sinkMBB. 12109 sinkMBB->splice(sinkMBB->begin(), BB, 12110 llvm::next(MachineBasicBlock::iterator(MI)), 12111 BB->end()); 12112 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 12113 12114 // Add the true and fallthrough blocks as its successors. 12115 BB->addSuccessor(copy0MBB); 12116 BB->addSuccessor(sinkMBB); 12117 12118 // Create the conditional branch instruction. 12119 unsigned Opc = 12120 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 12121 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB); 12122 12123 // copy0MBB: 12124 // %FalseValue = ... 12125 // # fallthrough to sinkMBB 12126 copy0MBB->addSuccessor(sinkMBB); 12127 12128 // sinkMBB: 12129 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 12130 // ... 12131 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 12132 TII->get(X86::PHI), MI->getOperand(0).getReg()) 12133 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 12134 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 12135 12136 MI->eraseFromParent(); // The pseudo instruction is gone now. 12137 return sinkMBB; 12138} 12139 12140MachineBasicBlock * 12141X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB, 12142 bool Is64Bit) const { 12143 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12144 DebugLoc DL = MI->getDebugLoc(); 12145 MachineFunction *MF = BB->getParent(); 12146 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 12147 12148 assert(getTargetMachine().Options.EnableSegmentedStacks); 12149 12150 unsigned TlsReg = Is64Bit ? X86::FS : X86::GS; 12151 unsigned TlsOffset = Is64Bit ? 0x70 : 0x30; 12152 12153 // BB: 12154 // ... [Till the alloca] 12155 // If stacklet is not large enough, jump to mallocMBB 12156 // 12157 // bumpMBB: 12158 // Allocate by subtracting from RSP 12159 // Jump to continueMBB 12160 // 12161 // mallocMBB: 12162 // Allocate by call to runtime 12163 // 12164 // continueMBB: 12165 // ... 12166 // [rest of original BB] 12167 // 12168 12169 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12170 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12171 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12172 12173 MachineRegisterInfo &MRI = MF->getRegInfo(); 12174 const TargetRegisterClass *AddrRegClass = 12175 getRegClassFor(Is64Bit ? MVT::i64:MVT::i32); 12176 12177 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass), 12178 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass), 12179 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass), 12180 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass), 12181 sizeVReg = MI->getOperand(1).getReg(), 12182 physSPReg = Is64Bit ? X86::RSP : X86::ESP; 12183 12184 MachineFunction::iterator MBBIter = BB; 12185 ++MBBIter; 12186 12187 MF->insert(MBBIter, bumpMBB); 12188 MF->insert(MBBIter, mallocMBB); 12189 MF->insert(MBBIter, continueMBB); 12190 12191 continueMBB->splice(continueMBB->begin(), BB, llvm::next 12192 (MachineBasicBlock::iterator(MI)), BB->end()); 12193 continueMBB->transferSuccessorsAndUpdatePHIs(BB); 12194 12195 // Add code to the main basic block to check if the stack limit has been hit, 12196 // and if so, jump to mallocMBB otherwise to bumpMBB. 12197 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg); 12198 BuildMI(BB, DL, TII->get(Is64Bit ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg) 12199 .addReg(tmpSPVReg).addReg(sizeVReg); 12200 BuildMI(BB, DL, TII->get(Is64Bit ? X86::CMP64mr:X86::CMP32mr)) 12201 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg) 12202 .addReg(SPLimitVReg); 12203 BuildMI(BB, DL, TII->get(X86::JG_4)).addMBB(mallocMBB); 12204 12205 // bumpMBB simply decreases the stack pointer, since we know the current 12206 // stacklet has enough space. 12207 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg) 12208 .addReg(SPLimitVReg); 12209 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg) 12210 .addReg(SPLimitVReg); 12211 BuildMI(bumpMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); 12212 12213 // Calls into a routine in libgcc to allocate more space from the heap. 12214 if (Is64Bit) { 12215 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI) 12216 .addReg(sizeVReg); 12217 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32)) 12218 .addExternalSymbol("__morestack_allocate_stack_space").addReg(X86::RDI); 12219 } else { 12220 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg) 12221 .addImm(12); 12222 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg); 12223 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32)) 12224 .addExternalSymbol("__morestack_allocate_stack_space"); 12225 } 12226 12227 if (!Is64Bit) 12228 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg) 12229 .addImm(16); 12230 12231 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg) 12232 .addReg(Is64Bit ? X86::RAX : X86::EAX); 12233 BuildMI(mallocMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); 12234 12235 // Set up the CFG correctly. 12236 BB->addSuccessor(bumpMBB); 12237 BB->addSuccessor(mallocMBB); 12238 mallocMBB->addSuccessor(continueMBB); 12239 bumpMBB->addSuccessor(continueMBB); 12240 12241 // Take care of the PHI nodes. 12242 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI), 12243 MI->getOperand(0).getReg()) 12244 .addReg(mallocPtrVReg).addMBB(mallocMBB) 12245 .addReg(bumpSPPtrVReg).addMBB(bumpMBB); 12246 12247 // Delete the original pseudo instruction. 12248 MI->eraseFromParent(); 12249 12250 // And we're done. 12251 return continueMBB; 12252} 12253 12254MachineBasicBlock * 12255X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI, 12256 MachineBasicBlock *BB) const { 12257 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12258 DebugLoc DL = MI->getDebugLoc(); 12259 12260 assert(!Subtarget->isTargetEnvMacho()); 12261 12262 // The lowering is pretty easy: we're just emitting the call to _alloca. The 12263 // non-trivial part is impdef of ESP. 12264 12265 if (Subtarget->isTargetWin64()) { 12266 if (Subtarget->isTargetCygMing()) { 12267 // ___chkstk(Mingw64): 12268 // Clobbers R10, R11, RAX and EFLAGS. 12269 // Updates RSP. 12270 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 12271 .addExternalSymbol("___chkstk") 12272 .addReg(X86::RAX, RegState::Implicit) 12273 .addReg(X86::RSP, RegState::Implicit) 12274 .addReg(X86::RAX, RegState::Define | RegState::Implicit) 12275 .addReg(X86::RSP, RegState::Define | RegState::Implicit) 12276 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 12277 } else { 12278 // __chkstk(MSVCRT): does not update stack pointer. 12279 // Clobbers R10, R11 and EFLAGS. 12280 // FIXME: RAX(allocated size) might be reused and not killed. 12281 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 12282 .addExternalSymbol("__chkstk") 12283 .addReg(X86::RAX, RegState::Implicit) 12284 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 12285 // RAX has the offset to subtracted from RSP. 12286 BuildMI(*BB, MI, DL, TII->get(X86::SUB64rr), X86::RSP) 12287 .addReg(X86::RSP) 12288 .addReg(X86::RAX); 12289 } 12290 } else { 12291 const char *StackProbeSymbol = 12292 Subtarget->isTargetWindows() ? "_chkstk" : "_alloca"; 12293 12294 BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32)) 12295 .addExternalSymbol(StackProbeSymbol) 12296 .addReg(X86::EAX, RegState::Implicit) 12297 .addReg(X86::ESP, RegState::Implicit) 12298 .addReg(X86::EAX, RegState::Define | RegState::Implicit) 12299 .addReg(X86::ESP, RegState::Define | RegState::Implicit) 12300 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 12301 } 12302 12303 MI->eraseFromParent(); // The pseudo instruction is gone now. 12304 return BB; 12305} 12306 12307MachineBasicBlock * 12308X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI, 12309 MachineBasicBlock *BB) const { 12310 // This is pretty easy. We're taking the value that we received from 12311 // our load from the relocation, sticking it in either RDI (x86-64) 12312 // or EAX and doing an indirect call. The return value will then 12313 // be in the normal return register. 12314 const X86InstrInfo *TII 12315 = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo()); 12316 DebugLoc DL = MI->getDebugLoc(); 12317 MachineFunction *F = BB->getParent(); 12318 12319 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?"); 12320 assert(MI->getOperand(3).isGlobal() && "This should be a global"); 12321 12322 if (Subtarget->is64Bit()) { 12323 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 12324 TII->get(X86::MOV64rm), X86::RDI) 12325 .addReg(X86::RIP) 12326 .addImm(0).addReg(0) 12327 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 12328 MI->getOperand(3).getTargetFlags()) 12329 .addReg(0); 12330 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m)); 12331 addDirectMem(MIB, X86::RDI); 12332 } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { 12333 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 12334 TII->get(X86::MOV32rm), X86::EAX) 12335 .addReg(0) 12336 .addImm(0).addReg(0) 12337 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 12338 MI->getOperand(3).getTargetFlags()) 12339 .addReg(0); 12340 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 12341 addDirectMem(MIB, X86::EAX); 12342 } else { 12343 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 12344 TII->get(X86::MOV32rm), X86::EAX) 12345 .addReg(TII->getGlobalBaseReg(F)) 12346 .addImm(0).addReg(0) 12347 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 12348 MI->getOperand(3).getTargetFlags()) 12349 .addReg(0); 12350 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 12351 addDirectMem(MIB, X86::EAX); 12352 } 12353 12354 MI->eraseFromParent(); // The pseudo instruction is gone now. 12355 return BB; 12356} 12357 12358MachineBasicBlock * 12359X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 12360 MachineBasicBlock *BB) const { 12361 switch (MI->getOpcode()) { 12362 default: assert(0 && "Unexpected instr type to insert"); 12363 case X86::TAILJMPd64: 12364 case X86::TAILJMPr64: 12365 case X86::TAILJMPm64: 12366 assert(0 && "TAILJMP64 would not be touched here."); 12367 case X86::TCRETURNdi64: 12368 case X86::TCRETURNri64: 12369 case X86::TCRETURNmi64: 12370 // Defs of TCRETURNxx64 has Win64's callee-saved registers, as subset. 12371 // On AMD64, additional defs should be added before register allocation. 12372 if (!Subtarget->isTargetWin64()) { 12373 MI->addRegisterDefined(X86::RSI); 12374 MI->addRegisterDefined(X86::RDI); 12375 MI->addRegisterDefined(X86::XMM6); 12376 MI->addRegisterDefined(X86::XMM7); 12377 MI->addRegisterDefined(X86::XMM8); 12378 MI->addRegisterDefined(X86::XMM9); 12379 MI->addRegisterDefined(X86::XMM10); 12380 MI->addRegisterDefined(X86::XMM11); 12381 MI->addRegisterDefined(X86::XMM12); 12382 MI->addRegisterDefined(X86::XMM13); 12383 MI->addRegisterDefined(X86::XMM14); 12384 MI->addRegisterDefined(X86::XMM15); 12385 } 12386 return BB; 12387 case X86::WIN_ALLOCA: 12388 return EmitLoweredWinAlloca(MI, BB); 12389 case X86::SEG_ALLOCA_32: 12390 return EmitLoweredSegAlloca(MI, BB, false); 12391 case X86::SEG_ALLOCA_64: 12392 return EmitLoweredSegAlloca(MI, BB, true); 12393 case X86::TLSCall_32: 12394 case X86::TLSCall_64: 12395 return EmitLoweredTLSCall(MI, BB); 12396 case X86::CMOV_GR8: 12397 case X86::CMOV_FR32: 12398 case X86::CMOV_FR64: 12399 case X86::CMOV_V4F32: 12400 case X86::CMOV_V2F64: 12401 case X86::CMOV_V2I64: 12402 case X86::CMOV_V8F32: 12403 case X86::CMOV_V4F64: 12404 case X86::CMOV_V4I64: 12405 case X86::CMOV_GR16: 12406 case X86::CMOV_GR32: 12407 case X86::CMOV_RFP32: 12408 case X86::CMOV_RFP64: 12409 case X86::CMOV_RFP80: 12410 return EmitLoweredSelect(MI, BB); 12411 12412 case X86::FP32_TO_INT16_IN_MEM: 12413 case X86::FP32_TO_INT32_IN_MEM: 12414 case X86::FP32_TO_INT64_IN_MEM: 12415 case X86::FP64_TO_INT16_IN_MEM: 12416 case X86::FP64_TO_INT32_IN_MEM: 12417 case X86::FP64_TO_INT64_IN_MEM: 12418 case X86::FP80_TO_INT16_IN_MEM: 12419 case X86::FP80_TO_INT32_IN_MEM: 12420 case X86::FP80_TO_INT64_IN_MEM: { 12421 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12422 DebugLoc DL = MI->getDebugLoc(); 12423 12424 // Change the floating point control register to use "round towards zero" 12425 // mode when truncating to an integer value. 12426 MachineFunction *F = BB->getParent(); 12427 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false); 12428 addFrameReference(BuildMI(*BB, MI, DL, 12429 TII->get(X86::FNSTCW16m)), CWFrameIdx); 12430 12431 // Load the old value of the high byte of the control word... 12432 unsigned OldCW = 12433 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass); 12434 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW), 12435 CWFrameIdx); 12436 12437 // Set the high part to be round to zero... 12438 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx) 12439 .addImm(0xC7F); 12440 12441 // Reload the modified control word now... 12442 addFrameReference(BuildMI(*BB, MI, DL, 12443 TII->get(X86::FLDCW16m)), CWFrameIdx); 12444 12445 // Restore the memory image of control word to original value 12446 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx) 12447 .addReg(OldCW); 12448 12449 // Get the X86 opcode to use. 12450 unsigned Opc; 12451 switch (MI->getOpcode()) { 12452 default: llvm_unreachable("illegal opcode!"); 12453 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 12454 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 12455 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 12456 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 12457 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 12458 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 12459 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 12460 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 12461 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 12462 } 12463 12464 X86AddressMode AM; 12465 MachineOperand &Op = MI->getOperand(0); 12466 if (Op.isReg()) { 12467 AM.BaseType = X86AddressMode::RegBase; 12468 AM.Base.Reg = Op.getReg(); 12469 } else { 12470 AM.BaseType = X86AddressMode::FrameIndexBase; 12471 AM.Base.FrameIndex = Op.getIndex(); 12472 } 12473 Op = MI->getOperand(1); 12474 if (Op.isImm()) 12475 AM.Scale = Op.getImm(); 12476 Op = MI->getOperand(2); 12477 if (Op.isImm()) 12478 AM.IndexReg = Op.getImm(); 12479 Op = MI->getOperand(3); 12480 if (Op.isGlobal()) { 12481 AM.GV = Op.getGlobal(); 12482 } else { 12483 AM.Disp = Op.getImm(); 12484 } 12485 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM) 12486 .addReg(MI->getOperand(X86::AddrNumOperands).getReg()); 12487 12488 // Reload the original control word now. 12489 addFrameReference(BuildMI(*BB, MI, DL, 12490 TII->get(X86::FLDCW16m)), CWFrameIdx); 12491 12492 MI->eraseFromParent(); // The pseudo instruction is gone now. 12493 return BB; 12494 } 12495 // String/text processing lowering. 12496 case X86::PCMPISTRM128REG: 12497 case X86::VPCMPISTRM128REG: 12498 return EmitPCMP(MI, BB, 3, false /* in-mem */); 12499 case X86::PCMPISTRM128MEM: 12500 case X86::VPCMPISTRM128MEM: 12501 return EmitPCMP(MI, BB, 3, true /* in-mem */); 12502 case X86::PCMPESTRM128REG: 12503 case X86::VPCMPESTRM128REG: 12504 return EmitPCMP(MI, BB, 5, false /* in mem */); 12505 case X86::PCMPESTRM128MEM: 12506 case X86::VPCMPESTRM128MEM: 12507 return EmitPCMP(MI, BB, 5, true /* in mem */); 12508 12509 // Thread synchronization. 12510 case X86::MONITOR: 12511 return EmitMonitor(MI, BB); 12512 case X86::MWAIT: 12513 return EmitMwait(MI, BB); 12514 12515 // Atomic Lowering. 12516 case X86::ATOMAND32: 12517 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, 12518 X86::AND32ri, X86::MOV32rm, 12519 X86::LCMPXCHG32, 12520 X86::NOT32r, X86::EAX, 12521 X86::GR32RegisterClass); 12522 case X86::ATOMOR32: 12523 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr, 12524 X86::OR32ri, X86::MOV32rm, 12525 X86::LCMPXCHG32, 12526 X86::NOT32r, X86::EAX, 12527 X86::GR32RegisterClass); 12528 case X86::ATOMXOR32: 12529 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr, 12530 X86::XOR32ri, X86::MOV32rm, 12531 X86::LCMPXCHG32, 12532 X86::NOT32r, X86::EAX, 12533 X86::GR32RegisterClass); 12534 case X86::ATOMNAND32: 12535 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, 12536 X86::AND32ri, X86::MOV32rm, 12537 X86::LCMPXCHG32, 12538 X86::NOT32r, X86::EAX, 12539 X86::GR32RegisterClass, true); 12540 case X86::ATOMMIN32: 12541 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr); 12542 case X86::ATOMMAX32: 12543 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG32rr); 12544 case X86::ATOMUMIN32: 12545 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB32rr); 12546 case X86::ATOMUMAX32: 12547 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA32rr); 12548 12549 case X86::ATOMAND16: 12550 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr, 12551 X86::AND16ri, X86::MOV16rm, 12552 X86::LCMPXCHG16, 12553 X86::NOT16r, X86::AX, 12554 X86::GR16RegisterClass); 12555 case X86::ATOMOR16: 12556 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr, 12557 X86::OR16ri, X86::MOV16rm, 12558 X86::LCMPXCHG16, 12559 X86::NOT16r, X86::AX, 12560 X86::GR16RegisterClass); 12561 case X86::ATOMXOR16: 12562 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR16rr, 12563 X86::XOR16ri, X86::MOV16rm, 12564 X86::LCMPXCHG16, 12565 X86::NOT16r, X86::AX, 12566 X86::GR16RegisterClass); 12567 case X86::ATOMNAND16: 12568 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr, 12569 X86::AND16ri, X86::MOV16rm, 12570 X86::LCMPXCHG16, 12571 X86::NOT16r, X86::AX, 12572 X86::GR16RegisterClass, true); 12573 case X86::ATOMMIN16: 12574 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL16rr); 12575 case X86::ATOMMAX16: 12576 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG16rr); 12577 case X86::ATOMUMIN16: 12578 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB16rr); 12579 case X86::ATOMUMAX16: 12580 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA16rr); 12581 12582 case X86::ATOMAND8: 12583 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr, 12584 X86::AND8ri, X86::MOV8rm, 12585 X86::LCMPXCHG8, 12586 X86::NOT8r, X86::AL, 12587 X86::GR8RegisterClass); 12588 case X86::ATOMOR8: 12589 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr, 12590 X86::OR8ri, X86::MOV8rm, 12591 X86::LCMPXCHG8, 12592 X86::NOT8r, X86::AL, 12593 X86::GR8RegisterClass); 12594 case X86::ATOMXOR8: 12595 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR8rr, 12596 X86::XOR8ri, X86::MOV8rm, 12597 X86::LCMPXCHG8, 12598 X86::NOT8r, X86::AL, 12599 X86::GR8RegisterClass); 12600 case X86::ATOMNAND8: 12601 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr, 12602 X86::AND8ri, X86::MOV8rm, 12603 X86::LCMPXCHG8, 12604 X86::NOT8r, X86::AL, 12605 X86::GR8RegisterClass, true); 12606 // FIXME: There are no CMOV8 instructions; MIN/MAX need some other way. 12607 // This group is for 64-bit host. 12608 case X86::ATOMAND64: 12609 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr, 12610 X86::AND64ri32, X86::MOV64rm, 12611 X86::LCMPXCHG64, 12612 X86::NOT64r, X86::RAX, 12613 X86::GR64RegisterClass); 12614 case X86::ATOMOR64: 12615 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr, 12616 X86::OR64ri32, X86::MOV64rm, 12617 X86::LCMPXCHG64, 12618 X86::NOT64r, X86::RAX, 12619 X86::GR64RegisterClass); 12620 case X86::ATOMXOR64: 12621 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr, 12622 X86::XOR64ri32, X86::MOV64rm, 12623 X86::LCMPXCHG64, 12624 X86::NOT64r, X86::RAX, 12625 X86::GR64RegisterClass); 12626 case X86::ATOMNAND64: 12627 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr, 12628 X86::AND64ri32, X86::MOV64rm, 12629 X86::LCMPXCHG64, 12630 X86::NOT64r, X86::RAX, 12631 X86::GR64RegisterClass, true); 12632 case X86::ATOMMIN64: 12633 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL64rr); 12634 case X86::ATOMMAX64: 12635 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG64rr); 12636 case X86::ATOMUMIN64: 12637 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB64rr); 12638 case X86::ATOMUMAX64: 12639 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA64rr); 12640 12641 // This group does 64-bit operations on a 32-bit host. 12642 case X86::ATOMAND6432: 12643 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12644 X86::AND32rr, X86::AND32rr, 12645 X86::AND32ri, X86::AND32ri, 12646 false); 12647 case X86::ATOMOR6432: 12648 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12649 X86::OR32rr, X86::OR32rr, 12650 X86::OR32ri, X86::OR32ri, 12651 false); 12652 case X86::ATOMXOR6432: 12653 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12654 X86::XOR32rr, X86::XOR32rr, 12655 X86::XOR32ri, X86::XOR32ri, 12656 false); 12657 case X86::ATOMNAND6432: 12658 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12659 X86::AND32rr, X86::AND32rr, 12660 X86::AND32ri, X86::AND32ri, 12661 true); 12662 case X86::ATOMADD6432: 12663 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12664 X86::ADD32rr, X86::ADC32rr, 12665 X86::ADD32ri, X86::ADC32ri, 12666 false); 12667 case X86::ATOMSUB6432: 12668 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12669 X86::SUB32rr, X86::SBB32rr, 12670 X86::SUB32ri, X86::SBB32ri, 12671 false); 12672 case X86::ATOMSWAP6432: 12673 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12674 X86::MOV32rr, X86::MOV32rr, 12675 X86::MOV32ri, X86::MOV32ri, 12676 false); 12677 case X86::VASTART_SAVE_XMM_REGS: 12678 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB); 12679 12680 case X86::VAARG_64: 12681 return EmitVAARG64WithCustomInserter(MI, BB); 12682 } 12683} 12684 12685//===----------------------------------------------------------------------===// 12686// X86 Optimization Hooks 12687//===----------------------------------------------------------------------===// 12688 12689void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 12690 const APInt &Mask, 12691 APInt &KnownZero, 12692 APInt &KnownOne, 12693 const SelectionDAG &DAG, 12694 unsigned Depth) const { 12695 unsigned Opc = Op.getOpcode(); 12696 assert((Opc >= ISD::BUILTIN_OP_END || 12697 Opc == ISD::INTRINSIC_WO_CHAIN || 12698 Opc == ISD::INTRINSIC_W_CHAIN || 12699 Opc == ISD::INTRINSIC_VOID) && 12700 "Should use MaskedValueIsZero if you don't know whether Op" 12701 " is a target node!"); 12702 12703 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything. 12704 switch (Opc) { 12705 default: break; 12706 case X86ISD::ADD: 12707 case X86ISD::SUB: 12708 case X86ISD::ADC: 12709 case X86ISD::SBB: 12710 case X86ISD::SMUL: 12711 case X86ISD::UMUL: 12712 case X86ISD::INC: 12713 case X86ISD::DEC: 12714 case X86ISD::OR: 12715 case X86ISD::XOR: 12716 case X86ISD::AND: 12717 // These nodes' second result is a boolean. 12718 if (Op.getResNo() == 0) 12719 break; 12720 // Fallthrough 12721 case X86ISD::SETCC: 12722 KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(), 12723 Mask.getBitWidth() - 1); 12724 break; 12725 case ISD::INTRINSIC_WO_CHAIN: { 12726 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 12727 unsigned NumLoBits = 0; 12728 switch (IntId) { 12729 default: break; 12730 case Intrinsic::x86_sse_movmsk_ps: 12731 case Intrinsic::x86_avx_movmsk_ps_256: 12732 case Intrinsic::x86_sse2_movmsk_pd: 12733 case Intrinsic::x86_avx_movmsk_pd_256: 12734 case Intrinsic::x86_mmx_pmovmskb: 12735 case Intrinsic::x86_sse2_pmovmskb_128: 12736 case Intrinsic::x86_avx2_pmovmskb: { 12737 // High bits of movmskp{s|d}, pmovmskb are known zero. 12738 switch (IntId) { 12739 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break; 12740 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break; 12741 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break; 12742 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break; 12743 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break; 12744 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break; 12745 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break; 12746 } 12747 KnownZero = APInt::getHighBitsSet(Mask.getBitWidth(), 12748 Mask.getBitWidth() - NumLoBits); 12749 break; 12750 } 12751 } 12752 break; 12753 } 12754 } 12755} 12756 12757unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 12758 unsigned Depth) const { 12759 // SETCC_CARRY sets the dest to ~0 for true or 0 for false. 12760 if (Op.getOpcode() == X86ISD::SETCC_CARRY) 12761 return Op.getValueType().getScalarType().getSizeInBits(); 12762 12763 // Fallback case. 12764 return 1; 12765} 12766 12767/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 12768/// node is a GlobalAddress + offset. 12769bool X86TargetLowering::isGAPlusOffset(SDNode *N, 12770 const GlobalValue* &GA, 12771 int64_t &Offset) const { 12772 if (N->getOpcode() == X86ISD::Wrapper) { 12773 if (isa<GlobalAddressSDNode>(N->getOperand(0))) { 12774 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 12775 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset(); 12776 return true; 12777 } 12778 } 12779 return TargetLowering::isGAPlusOffset(N, GA, Offset); 12780} 12781 12782/// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the 12783/// same as extracting the high 128-bit part of 256-bit vector and then 12784/// inserting the result into the low part of a new 256-bit vector 12785static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) { 12786 EVT VT = SVOp->getValueType(0); 12787 int NumElems = VT.getVectorNumElements(); 12788 12789 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 12790 for (int i = 0, j = NumElems/2; i < NumElems/2; ++i, ++j) 12791 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 12792 SVOp->getMaskElt(j) >= 0) 12793 return false; 12794 12795 return true; 12796} 12797 12798/// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the 12799/// same as extracting the low 128-bit part of 256-bit vector and then 12800/// inserting the result into the high part of a new 256-bit vector 12801static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) { 12802 EVT VT = SVOp->getValueType(0); 12803 int NumElems = VT.getVectorNumElements(); 12804 12805 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 12806 for (int i = NumElems/2, j = 0; i < NumElems; ++i, ++j) 12807 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 12808 SVOp->getMaskElt(j) >= 0) 12809 return false; 12810 12811 return true; 12812} 12813 12814/// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors. 12815static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, 12816 TargetLowering::DAGCombinerInfo &DCI, 12817 bool HasAVX2) { 12818 DebugLoc dl = N->getDebugLoc(); 12819 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 12820 SDValue V1 = SVOp->getOperand(0); 12821 SDValue V2 = SVOp->getOperand(1); 12822 EVT VT = SVOp->getValueType(0); 12823 int NumElems = VT.getVectorNumElements(); 12824 12825 if (V1.getOpcode() == ISD::CONCAT_VECTORS && 12826 V2.getOpcode() == ISD::CONCAT_VECTORS) { 12827 // 12828 // 0,0,0,... 12829 // | 12830 // V UNDEF BUILD_VECTOR UNDEF 12831 // \ / \ / 12832 // CONCAT_VECTOR CONCAT_VECTOR 12833 // \ / 12834 // \ / 12835 // RESULT: V + zero extended 12836 // 12837 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR || 12838 V2.getOperand(1).getOpcode() != ISD::UNDEF || 12839 V1.getOperand(1).getOpcode() != ISD::UNDEF) 12840 return SDValue(); 12841 12842 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode())) 12843 return SDValue(); 12844 12845 // To match the shuffle mask, the first half of the mask should 12846 // be exactly the first vector, and all the rest a splat with the 12847 // first element of the second one. 12848 for (int i = 0; i < NumElems/2; ++i) 12849 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) || 12850 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems)) 12851 return SDValue(); 12852 12853 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD. 12854 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) { 12855 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other); 12856 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() }; 12857 SDValue ResNode = 12858 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 2, 12859 Ld->getMemoryVT(), 12860 Ld->getPointerInfo(), 12861 Ld->getAlignment(), 12862 false/*isVolatile*/, true/*ReadMem*/, 12863 false/*WriteMem*/); 12864 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode); 12865 } 12866 12867 // Emit a zeroed vector and insert the desired subvector on its 12868 // first half. 12869 SDValue Zeros = getZeroVector(VT, true /* HasSSE2 */, HasAVX2, DAG, dl); 12870 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 12871 DAG.getConstant(0, MVT::i32), DAG, dl); 12872 return DCI.CombineTo(N, InsV); 12873 } 12874 12875 //===--------------------------------------------------------------------===// 12876 // Combine some shuffles into subvector extracts and inserts: 12877 // 12878 12879 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 12880 if (isShuffleHigh128VectorInsertLow(SVOp)) { 12881 SDValue V = Extract128BitVector(V1, DAG.getConstant(NumElems/2, MVT::i32), 12882 DAG, dl); 12883 SDValue InsV = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), 12884 V, DAG.getConstant(0, MVT::i32), DAG, dl); 12885 return DCI.CombineTo(N, InsV); 12886 } 12887 12888 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 12889 if (isShuffleLow128VectorInsertHigh(SVOp)) { 12890 SDValue V = Extract128BitVector(V1, DAG.getConstant(0, MVT::i32), DAG, dl); 12891 SDValue InsV = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), 12892 V, DAG.getConstant(NumElems/2, MVT::i32), DAG, dl); 12893 return DCI.CombineTo(N, InsV); 12894 } 12895 12896 return SDValue(); 12897} 12898 12899/// PerformShuffleCombine - Performs several different shuffle combines. 12900static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 12901 TargetLowering::DAGCombinerInfo &DCI, 12902 const X86Subtarget *Subtarget) { 12903 DebugLoc dl = N->getDebugLoc(); 12904 EVT VT = N->getValueType(0); 12905 12906 // Don't create instructions with illegal types after legalize types has run. 12907 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12908 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType())) 12909 return SDValue(); 12910 12911 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode 12912 if (Subtarget->hasAVX() && VT.getSizeInBits() == 256 && 12913 N->getOpcode() == ISD::VECTOR_SHUFFLE) 12914 return PerformShuffleCombine256(N, DAG, DCI, Subtarget->hasAVX2()); 12915 12916 // Only handle 128 wide vector from here on. 12917 if (VT.getSizeInBits() != 128) 12918 return SDValue(); 12919 12920 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3, 12921 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are 12922 // consecutive, non-overlapping, and in the right order. 12923 SmallVector<SDValue, 16> Elts; 12924 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 12925 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0)); 12926 12927 return EltsFromConsecutiveLoads(VT, Elts, dl, DAG); 12928} 12929 12930/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index 12931/// generation and convert it from being a bunch of shuffles and extracts 12932/// to a simple store and scalar loads to extract the elements. 12933static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, 12934 const TargetLowering &TLI) { 12935 SDValue InputVector = N->getOperand(0); 12936 12937 // Only operate on vectors of 4 elements, where the alternative shuffling 12938 // gets to be more expensive. 12939 if (InputVector.getValueType() != MVT::v4i32) 12940 return SDValue(); 12941 12942 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a 12943 // single use which is a sign-extend or zero-extend, and all elements are 12944 // used. 12945 SmallVector<SDNode *, 4> Uses; 12946 unsigned ExtractedElements = 0; 12947 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(), 12948 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) { 12949 if (UI.getUse().getResNo() != InputVector.getResNo()) 12950 return SDValue(); 12951 12952 SDNode *Extract = *UI; 12953 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 12954 return SDValue(); 12955 12956 if (Extract->getValueType(0) != MVT::i32) 12957 return SDValue(); 12958 if (!Extract->hasOneUse()) 12959 return SDValue(); 12960 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND && 12961 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND) 12962 return SDValue(); 12963 if (!isa<ConstantSDNode>(Extract->getOperand(1))) 12964 return SDValue(); 12965 12966 // Record which element was extracted. 12967 ExtractedElements |= 12968 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue(); 12969 12970 Uses.push_back(Extract); 12971 } 12972 12973 // If not all the elements were used, this may not be worthwhile. 12974 if (ExtractedElements != 15) 12975 return SDValue(); 12976 12977 // Ok, we've now decided to do the transformation. 12978 DebugLoc dl = InputVector.getDebugLoc(); 12979 12980 // Store the value to a temporary stack slot. 12981 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType()); 12982 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr, 12983 MachinePointerInfo(), false, false, 0); 12984 12985 // Replace each use (extract) with a load of the appropriate element. 12986 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(), 12987 UE = Uses.end(); UI != UE; ++UI) { 12988 SDNode *Extract = *UI; 12989 12990 // cOMpute the element's address. 12991 SDValue Idx = Extract->getOperand(1); 12992 unsigned EltSize = 12993 InputVector.getValueType().getVectorElementType().getSizeInBits()/8; 12994 uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue(); 12995 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy()); 12996 12997 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), 12998 StackPtr, OffsetVal); 12999 13000 // Load the scalar. 13001 SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch, 13002 ScalarAddr, MachinePointerInfo(), 13003 false, false, false, 0); 13004 13005 // Replace the exact with the load. 13006 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar); 13007 } 13008 13009 // The replacement was made in place; don't return anything. 13010 return SDValue(); 13011} 13012 13013/// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT 13014/// nodes. 13015static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 13016 TargetLowering::DAGCombinerInfo &DCI, 13017 const X86Subtarget *Subtarget) { 13018 DebugLoc DL = N->getDebugLoc(); 13019 SDValue Cond = N->getOperand(0); 13020 // Get the LHS/RHS of the select. 13021 SDValue LHS = N->getOperand(1); 13022 SDValue RHS = N->getOperand(2); 13023 EVT VT = LHS.getValueType(); 13024 13025 // If we have SSE[12] support, try to form min/max nodes. SSE min/max 13026 // instructions match the semantics of the common C idiom x<y?x:y but not 13027 // x<=y?x:y, because of how they handle negative zero (which can be 13028 // ignored in unsafe-math mode). 13029 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() && 13030 VT != MVT::f80 && DAG.getTargetLoweringInfo().isTypeLegal(VT) && 13031 (Subtarget->hasSSE2() || 13032 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) { 13033 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 13034 13035 unsigned Opcode = 0; 13036 // Check for x CC y ? x : y. 13037 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) && 13038 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 13039 switch (CC) { 13040 default: break; 13041 case ISD::SETULT: 13042 // Converting this to a min would handle NaNs incorrectly, and swapping 13043 // the operands would cause it to handle comparisons between positive 13044 // and negative zero incorrectly. 13045 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 13046 if (!DAG.getTarget().Options.UnsafeFPMath && 13047 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 13048 break; 13049 std::swap(LHS, RHS); 13050 } 13051 Opcode = X86ISD::FMIN; 13052 break; 13053 case ISD::SETOLE: 13054 // Converting this to a min would handle comparisons between positive 13055 // and negative zero incorrectly. 13056 if (!DAG.getTarget().Options.UnsafeFPMath && 13057 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 13058 break; 13059 Opcode = X86ISD::FMIN; 13060 break; 13061 case ISD::SETULE: 13062 // Converting this to a min would handle both negative zeros and NaNs 13063 // incorrectly, but we can swap the operands to fix both. 13064 std::swap(LHS, RHS); 13065 case ISD::SETOLT: 13066 case ISD::SETLT: 13067 case ISD::SETLE: 13068 Opcode = X86ISD::FMIN; 13069 break; 13070 13071 case ISD::SETOGE: 13072 // Converting this to a max would handle comparisons between positive 13073 // and negative zero incorrectly. 13074 if (!DAG.getTarget().Options.UnsafeFPMath && 13075 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 13076 break; 13077 Opcode = X86ISD::FMAX; 13078 break; 13079 case ISD::SETUGT: 13080 // Converting this to a max would handle NaNs incorrectly, and swapping 13081 // the operands would cause it to handle comparisons between positive 13082 // and negative zero incorrectly. 13083 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 13084 if (!DAG.getTarget().Options.UnsafeFPMath && 13085 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 13086 break; 13087 std::swap(LHS, RHS); 13088 } 13089 Opcode = X86ISD::FMAX; 13090 break; 13091 case ISD::SETUGE: 13092 // Converting this to a max would handle both negative zeros and NaNs 13093 // incorrectly, but we can swap the operands to fix both. 13094 std::swap(LHS, RHS); 13095 case ISD::SETOGT: 13096 case ISD::SETGT: 13097 case ISD::SETGE: 13098 Opcode = X86ISD::FMAX; 13099 break; 13100 } 13101 // Check for x CC y ? y : x -- a min/max with reversed arms. 13102 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) && 13103 DAG.isEqualTo(RHS, Cond.getOperand(0))) { 13104 switch (CC) { 13105 default: break; 13106 case ISD::SETOGE: 13107 // Converting this to a min would handle comparisons between positive 13108 // and negative zero incorrectly, and swapping the operands would 13109 // cause it to handle NaNs incorrectly. 13110 if (!DAG.getTarget().Options.UnsafeFPMath && 13111 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) { 13112 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 13113 break; 13114 std::swap(LHS, RHS); 13115 } 13116 Opcode = X86ISD::FMIN; 13117 break; 13118 case ISD::SETUGT: 13119 // Converting this to a min would handle NaNs incorrectly. 13120 if (!DAG.getTarget().Options.UnsafeFPMath && 13121 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) 13122 break; 13123 Opcode = X86ISD::FMIN; 13124 break; 13125 case ISD::SETUGE: 13126 // Converting this to a min would handle both negative zeros and NaNs 13127 // incorrectly, but we can swap the operands to fix both. 13128 std::swap(LHS, RHS); 13129 case ISD::SETOGT: 13130 case ISD::SETGT: 13131 case ISD::SETGE: 13132 Opcode = X86ISD::FMIN; 13133 break; 13134 13135 case ISD::SETULT: 13136 // Converting this to a max would handle NaNs incorrectly. 13137 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 13138 break; 13139 Opcode = X86ISD::FMAX; 13140 break; 13141 case ISD::SETOLE: 13142 // Converting this to a max would handle comparisons between positive 13143 // and negative zero incorrectly, and swapping the operands would 13144 // cause it to handle NaNs incorrectly. 13145 if (!DAG.getTarget().Options.UnsafeFPMath && 13146 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) { 13147 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 13148 break; 13149 std::swap(LHS, RHS); 13150 } 13151 Opcode = X86ISD::FMAX; 13152 break; 13153 case ISD::SETULE: 13154 // Converting this to a max would handle both negative zeros and NaNs 13155 // incorrectly, but we can swap the operands to fix both. 13156 std::swap(LHS, RHS); 13157 case ISD::SETOLT: 13158 case ISD::SETLT: 13159 case ISD::SETLE: 13160 Opcode = X86ISD::FMAX; 13161 break; 13162 } 13163 } 13164 13165 if (Opcode) 13166 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS); 13167 } 13168 13169 // If this is a select between two integer constants, try to do some 13170 // optimizations. 13171 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) { 13172 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS)) 13173 // Don't do this for crazy integer types. 13174 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) { 13175 // If this is efficiently invertible, canonicalize the LHSC/RHSC values 13176 // so that TrueC (the true value) is larger than FalseC. 13177 bool NeedsCondInvert = false; 13178 13179 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) && 13180 // Efficiently invertible. 13181 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible. 13182 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible. 13183 isa<ConstantSDNode>(Cond.getOperand(1))))) { 13184 NeedsCondInvert = true; 13185 std::swap(TrueC, FalseC); 13186 } 13187 13188 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0. 13189 if (FalseC->getAPIntValue() == 0 && 13190 TrueC->getAPIntValue().isPowerOf2()) { 13191 if (NeedsCondInvert) // Invert the condition if needed. 13192 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 13193 DAG.getConstant(1, Cond.getValueType())); 13194 13195 // Zero extend the condition if needed. 13196 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond); 13197 13198 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 13199 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond, 13200 DAG.getConstant(ShAmt, MVT::i8)); 13201 } 13202 13203 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. 13204 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 13205 if (NeedsCondInvert) // Invert the condition if needed. 13206 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 13207 DAG.getConstant(1, Cond.getValueType())); 13208 13209 // Zero extend the condition if needed. 13210 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 13211 FalseC->getValueType(0), Cond); 13212 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 13213 SDValue(FalseC, 0)); 13214 } 13215 13216 // Optimize cases that will turn into an LEA instruction. This requires 13217 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 13218 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 13219 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 13220 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 13221 13222 bool isFastMultiplier = false; 13223 if (Diff < 10) { 13224 switch ((unsigned char)Diff) { 13225 default: break; 13226 case 1: // result = add base, cond 13227 case 2: // result = lea base( , cond*2) 13228 case 3: // result = lea base(cond, cond*2) 13229 case 4: // result = lea base( , cond*4) 13230 case 5: // result = lea base(cond, cond*4) 13231 case 8: // result = lea base( , cond*8) 13232 case 9: // result = lea base(cond, cond*8) 13233 isFastMultiplier = true; 13234 break; 13235 } 13236 } 13237 13238 if (isFastMultiplier) { 13239 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 13240 if (NeedsCondInvert) // Invert the condition if needed. 13241 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 13242 DAG.getConstant(1, Cond.getValueType())); 13243 13244 // Zero extend the condition if needed. 13245 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 13246 Cond); 13247 // Scale the condition by the difference. 13248 if (Diff != 1) 13249 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 13250 DAG.getConstant(Diff, Cond.getValueType())); 13251 13252 // Add the base if non-zero. 13253 if (FalseC->getAPIntValue() != 0) 13254 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 13255 SDValue(FalseC, 0)); 13256 return Cond; 13257 } 13258 } 13259 } 13260 } 13261 13262 // Canonicalize max and min: 13263 // (x > y) ? x : y -> (x >= y) ? x : y 13264 // (x < y) ? x : y -> (x <= y) ? x : y 13265 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates 13266 // the need for an extra compare 13267 // against zero. e.g. 13268 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0 13269 // subl %esi, %edi 13270 // testl %edi, %edi 13271 // movl $0, %eax 13272 // cmovgl %edi, %eax 13273 // => 13274 // xorl %eax, %eax 13275 // subl %esi, $edi 13276 // cmovsl %eax, %edi 13277 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC && 13278 DAG.isEqualTo(LHS, Cond.getOperand(0)) && 13279 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 13280 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 13281 switch (CC) { 13282 default: break; 13283 case ISD::SETLT: 13284 case ISD::SETGT: { 13285 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE; 13286 Cond = DAG.getSetCC(Cond.getDebugLoc(), Cond.getValueType(), 13287 Cond.getOperand(0), Cond.getOperand(1), NewCC); 13288 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS); 13289 } 13290 } 13291 } 13292 13293 // If we know that this node is legal then we know that it is going to be 13294 // matched by one of the SSE/AVX BLEND instructions. These instructions only 13295 // depend on the highest bit in each word. Try to use SimplifyDemandedBits 13296 // to simplify previous instructions. 13297 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13298 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() && 13299 !DCI.isBeforeLegalize() && 13300 TLI.isOperationLegal(ISD::VSELECT, VT)) { 13301 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits(); 13302 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size"); 13303 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1); 13304 13305 APInt KnownZero, KnownOne; 13306 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(), 13307 DCI.isBeforeLegalizeOps()); 13308 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) || 13309 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne, TLO)) 13310 DCI.CommitTargetLoweringOpt(TLO); 13311 } 13312 13313 return SDValue(); 13314} 13315 13316/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL] 13317static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, 13318 TargetLowering::DAGCombinerInfo &DCI) { 13319 DebugLoc DL = N->getDebugLoc(); 13320 13321 // If the flag operand isn't dead, don't touch this CMOV. 13322 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty()) 13323 return SDValue(); 13324 13325 SDValue FalseOp = N->getOperand(0); 13326 SDValue TrueOp = N->getOperand(1); 13327 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2); 13328 SDValue Cond = N->getOperand(3); 13329 if (CC == X86::COND_E || CC == X86::COND_NE) { 13330 switch (Cond.getOpcode()) { 13331 default: break; 13332 case X86ISD::BSR: 13333 case X86ISD::BSF: 13334 // If operand of BSR / BSF are proven never zero, then ZF cannot be set. 13335 if (DAG.isKnownNeverZero(Cond.getOperand(0))) 13336 return (CC == X86::COND_E) ? FalseOp : TrueOp; 13337 } 13338 } 13339 13340 // If this is a select between two integer constants, try to do some 13341 // optimizations. Note that the operands are ordered the opposite of SELECT 13342 // operands. 13343 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) { 13344 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) { 13345 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is 13346 // larger than FalseC (the false value). 13347 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) { 13348 CC = X86::GetOppositeBranchCondition(CC); 13349 std::swap(TrueC, FalseC); 13350 } 13351 13352 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0. 13353 // This is efficient for any integer data type (including i8/i16) and 13354 // shift amount. 13355 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) { 13356 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 13357 DAG.getConstant(CC, MVT::i8), Cond); 13358 13359 // Zero extend the condition if needed. 13360 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond); 13361 13362 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 13363 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond, 13364 DAG.getConstant(ShAmt, MVT::i8)); 13365 if (N->getNumValues() == 2) // Dead flag value? 13366 return DCI.CombineTo(N, Cond, SDValue()); 13367 return Cond; 13368 } 13369 13370 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient 13371 // for any integer data type, including i8/i16. 13372 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 13373 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 13374 DAG.getConstant(CC, MVT::i8), Cond); 13375 13376 // Zero extend the condition if needed. 13377 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 13378 FalseC->getValueType(0), Cond); 13379 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 13380 SDValue(FalseC, 0)); 13381 13382 if (N->getNumValues() == 2) // Dead flag value? 13383 return DCI.CombineTo(N, Cond, SDValue()); 13384 return Cond; 13385 } 13386 13387 // Optimize cases that will turn into an LEA instruction. This requires 13388 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 13389 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 13390 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 13391 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 13392 13393 bool isFastMultiplier = false; 13394 if (Diff < 10) { 13395 switch ((unsigned char)Diff) { 13396 default: break; 13397 case 1: // result = add base, cond 13398 case 2: // result = lea base( , cond*2) 13399 case 3: // result = lea base(cond, cond*2) 13400 case 4: // result = lea base( , cond*4) 13401 case 5: // result = lea base(cond, cond*4) 13402 case 8: // result = lea base( , cond*8) 13403 case 9: // result = lea base(cond, cond*8) 13404 isFastMultiplier = true; 13405 break; 13406 } 13407 } 13408 13409 if (isFastMultiplier) { 13410 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 13411 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 13412 DAG.getConstant(CC, MVT::i8), Cond); 13413 // Zero extend the condition if needed. 13414 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 13415 Cond); 13416 // Scale the condition by the difference. 13417 if (Diff != 1) 13418 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 13419 DAG.getConstant(Diff, Cond.getValueType())); 13420 13421 // Add the base if non-zero. 13422 if (FalseC->getAPIntValue() != 0) 13423 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 13424 SDValue(FalseC, 0)); 13425 if (N->getNumValues() == 2) // Dead flag value? 13426 return DCI.CombineTo(N, Cond, SDValue()); 13427 return Cond; 13428 } 13429 } 13430 } 13431 } 13432 return SDValue(); 13433} 13434 13435 13436/// PerformMulCombine - Optimize a single multiply with constant into two 13437/// in order to implement it with two cheaper instructions, e.g. 13438/// LEA + SHL, LEA + LEA. 13439static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG, 13440 TargetLowering::DAGCombinerInfo &DCI) { 13441 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 13442 return SDValue(); 13443 13444 EVT VT = N->getValueType(0); 13445 if (VT != MVT::i64) 13446 return SDValue(); 13447 13448 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 13449 if (!C) 13450 return SDValue(); 13451 uint64_t MulAmt = C->getZExtValue(); 13452 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9) 13453 return SDValue(); 13454 13455 uint64_t MulAmt1 = 0; 13456 uint64_t MulAmt2 = 0; 13457 if ((MulAmt % 9) == 0) { 13458 MulAmt1 = 9; 13459 MulAmt2 = MulAmt / 9; 13460 } else if ((MulAmt % 5) == 0) { 13461 MulAmt1 = 5; 13462 MulAmt2 = MulAmt / 5; 13463 } else if ((MulAmt % 3) == 0) { 13464 MulAmt1 = 3; 13465 MulAmt2 = MulAmt / 3; 13466 } 13467 if (MulAmt2 && 13468 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){ 13469 DebugLoc DL = N->getDebugLoc(); 13470 13471 if (isPowerOf2_64(MulAmt2) && 13472 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD)) 13473 // If second multiplifer is pow2, issue it first. We want the multiply by 13474 // 3, 5, or 9 to be folded into the addressing mode unless the lone use 13475 // is an add. 13476 std::swap(MulAmt1, MulAmt2); 13477 13478 SDValue NewMul; 13479 if (isPowerOf2_64(MulAmt1)) 13480 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 13481 DAG.getConstant(Log2_64(MulAmt1), MVT::i8)); 13482 else 13483 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), 13484 DAG.getConstant(MulAmt1, VT)); 13485 13486 if (isPowerOf2_64(MulAmt2)) 13487 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul, 13488 DAG.getConstant(Log2_64(MulAmt2), MVT::i8)); 13489 else 13490 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul, 13491 DAG.getConstant(MulAmt2, VT)); 13492 13493 // Do not add new nodes to DAG combiner worklist. 13494 DCI.CombineTo(N, NewMul, false); 13495 } 13496 return SDValue(); 13497} 13498 13499static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { 13500 SDValue N0 = N->getOperand(0); 13501 SDValue N1 = N->getOperand(1); 13502 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 13503 EVT VT = N0.getValueType(); 13504 13505 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2)) 13506 // since the result of setcc_c is all zero's or all ones. 13507 if (VT.isInteger() && !VT.isVector() && 13508 N1C && N0.getOpcode() == ISD::AND && 13509 N0.getOperand(1).getOpcode() == ISD::Constant) { 13510 SDValue N00 = N0.getOperand(0); 13511 if (N00.getOpcode() == X86ISD::SETCC_CARRY || 13512 ((N00.getOpcode() == ISD::ANY_EXTEND || 13513 N00.getOpcode() == ISD::ZERO_EXTEND) && 13514 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) { 13515 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 13516 APInt ShAmt = N1C->getAPIntValue(); 13517 Mask = Mask.shl(ShAmt); 13518 if (Mask != 0) 13519 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 13520 N00, DAG.getConstant(Mask, VT)); 13521 } 13522 } 13523 13524 13525 // Hardware support for vector shifts is sparse which makes us scalarize the 13526 // vector operations in many cases. Also, on sandybridge ADD is faster than 13527 // shl. 13528 // (shl V, 1) -> add V,V 13529 if (isSplatVector(N1.getNode())) { 13530 assert(N0.getValueType().isVector() && "Invalid vector shift type"); 13531 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(0)); 13532 // We shift all of the values by one. In many cases we do not have 13533 // hardware support for this operation. This is better expressed as an ADD 13534 // of two values. 13535 if (N1C && (1 == N1C->getZExtValue())) { 13536 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, N0); 13537 } 13538 } 13539 13540 return SDValue(); 13541} 13542 13543/// PerformShiftCombine - Transforms vector shift nodes to use vector shifts 13544/// when possible. 13545static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG, 13546 const X86Subtarget *Subtarget) { 13547 EVT VT = N->getValueType(0); 13548 if (N->getOpcode() == ISD::SHL) { 13549 SDValue V = PerformSHLCombine(N, DAG); 13550 if (V.getNode()) return V; 13551 } 13552 13553 // On X86 with SSE2 support, we can transform this to a vector shift if 13554 // all elements are shifted by the same amount. We can't do this in legalize 13555 // because the a constant vector is typically transformed to a constant pool 13556 // so we have no knowledge of the shift amount. 13557 if (!Subtarget->hasSSE2()) 13558 return SDValue(); 13559 13560 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 && 13561 (!Subtarget->hasAVX2() || 13562 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16))) 13563 return SDValue(); 13564 13565 SDValue ShAmtOp = N->getOperand(1); 13566 EVT EltVT = VT.getVectorElementType(); 13567 DebugLoc DL = N->getDebugLoc(); 13568 SDValue BaseShAmt = SDValue(); 13569 if (ShAmtOp.getOpcode() == ISD::BUILD_VECTOR) { 13570 unsigned NumElts = VT.getVectorNumElements(); 13571 unsigned i = 0; 13572 for (; i != NumElts; ++i) { 13573 SDValue Arg = ShAmtOp.getOperand(i); 13574 if (Arg.getOpcode() == ISD::UNDEF) continue; 13575 BaseShAmt = Arg; 13576 break; 13577 } 13578 // Handle the case where the build_vector is all undef 13579 // FIXME: Should DAG allow this? 13580 if (i == NumElts) 13581 return SDValue(); 13582 13583 for (; i != NumElts; ++i) { 13584 SDValue Arg = ShAmtOp.getOperand(i); 13585 if (Arg.getOpcode() == ISD::UNDEF) continue; 13586 if (Arg != BaseShAmt) { 13587 return SDValue(); 13588 } 13589 } 13590 } else if (ShAmtOp.getOpcode() == ISD::VECTOR_SHUFFLE && 13591 cast<ShuffleVectorSDNode>(ShAmtOp)->isSplat()) { 13592 SDValue InVec = ShAmtOp.getOperand(0); 13593 if (InVec.getOpcode() == ISD::BUILD_VECTOR) { 13594 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 13595 unsigned i = 0; 13596 for (; i != NumElts; ++i) { 13597 SDValue Arg = InVec.getOperand(i); 13598 if (Arg.getOpcode() == ISD::UNDEF) continue; 13599 BaseShAmt = Arg; 13600 break; 13601 } 13602 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) { 13603 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(InVec.getOperand(2))) { 13604 unsigned SplatIdx= cast<ShuffleVectorSDNode>(ShAmtOp)->getSplatIndex(); 13605 if (C->getZExtValue() == SplatIdx) 13606 BaseShAmt = InVec.getOperand(1); 13607 } 13608 } 13609 if (BaseShAmt.getNode() == 0) 13610 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, ShAmtOp, 13611 DAG.getIntPtrConstant(0)); 13612 } else 13613 return SDValue(); 13614 13615 // The shift amount is an i32. 13616 if (EltVT.bitsGT(MVT::i32)) 13617 BaseShAmt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, BaseShAmt); 13618 else if (EltVT.bitsLT(MVT::i32)) 13619 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, BaseShAmt); 13620 13621 // The shift amount is identical so we can do a vector shift. 13622 SDValue ValOp = N->getOperand(0); 13623 switch (N->getOpcode()) { 13624 default: 13625 llvm_unreachable("Unknown shift opcode!"); 13626 case ISD::SHL: 13627 switch (VT.getSimpleVT().SimpleTy) { 13628 default: return SDValue(); 13629 case MVT::v2i64: 13630 case MVT::v4i32: 13631 case MVT::v8i16: 13632 case MVT::v4i64: 13633 case MVT::v8i32: 13634 case MVT::v16i16: 13635 return getTargetVShiftNode(X86ISD::VSHLI, DL, VT, ValOp, BaseShAmt, DAG); 13636 } 13637 case ISD::SRA: 13638 switch (VT.getSimpleVT().SimpleTy) { 13639 default: return SDValue(); 13640 case MVT::v4i32: 13641 case MVT::v8i16: 13642 case MVT::v8i32: 13643 case MVT::v16i16: 13644 return getTargetVShiftNode(X86ISD::VSRAI, DL, VT, ValOp, BaseShAmt, DAG); 13645 } 13646 case ISD::SRL: 13647 switch (VT.getSimpleVT().SimpleTy) { 13648 default: return SDValue(); 13649 case MVT::v2i64: 13650 case MVT::v4i32: 13651 case MVT::v8i16: 13652 case MVT::v4i64: 13653 case MVT::v8i32: 13654 case MVT::v16i16: 13655 return getTargetVShiftNode(X86ISD::VSRLI, DL, VT, ValOp, BaseShAmt, DAG); 13656 } 13657 } 13658} 13659 13660 13661// CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..)) 13662// where both setccs reference the same FP CMP, and rewrite for CMPEQSS 13663// and friends. Likewise for OR -> CMPNEQSS. 13664static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG, 13665 TargetLowering::DAGCombinerInfo &DCI, 13666 const X86Subtarget *Subtarget) { 13667 unsigned opcode; 13668 13669 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but 13670 // we're requiring SSE2 for both. 13671 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) { 13672 SDValue N0 = N->getOperand(0); 13673 SDValue N1 = N->getOperand(1); 13674 SDValue CMP0 = N0->getOperand(1); 13675 SDValue CMP1 = N1->getOperand(1); 13676 DebugLoc DL = N->getDebugLoc(); 13677 13678 // The SETCCs should both refer to the same CMP. 13679 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1) 13680 return SDValue(); 13681 13682 SDValue CMP00 = CMP0->getOperand(0); 13683 SDValue CMP01 = CMP0->getOperand(1); 13684 EVT VT = CMP00.getValueType(); 13685 13686 if (VT == MVT::f32 || VT == MVT::f64) { 13687 bool ExpectingFlags = false; 13688 // Check for any users that want flags: 13689 for (SDNode::use_iterator UI = N->use_begin(), 13690 UE = N->use_end(); 13691 !ExpectingFlags && UI != UE; ++UI) 13692 switch (UI->getOpcode()) { 13693 default: 13694 case ISD::BR_CC: 13695 case ISD::BRCOND: 13696 case ISD::SELECT: 13697 ExpectingFlags = true; 13698 break; 13699 case ISD::CopyToReg: 13700 case ISD::SIGN_EXTEND: 13701 case ISD::ZERO_EXTEND: 13702 case ISD::ANY_EXTEND: 13703 break; 13704 } 13705 13706 if (!ExpectingFlags) { 13707 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0); 13708 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0); 13709 13710 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) { 13711 X86::CondCode tmp = cc0; 13712 cc0 = cc1; 13713 cc1 = tmp; 13714 } 13715 13716 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) || 13717 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) { 13718 bool is64BitFP = (CMP00.getValueType() == MVT::f64); 13719 X86ISD::NodeType NTOperator = is64BitFP ? 13720 X86ISD::FSETCCsd : X86ISD::FSETCCss; 13721 // FIXME: need symbolic constants for these magic numbers. 13722 // See X86ATTInstPrinter.cpp:printSSECC(). 13723 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4; 13724 SDValue OnesOrZeroesF = DAG.getNode(NTOperator, DL, MVT::f32, CMP00, CMP01, 13725 DAG.getConstant(x86cc, MVT::i8)); 13726 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, MVT::i32, 13727 OnesOrZeroesF); 13728 SDValue ANDed = DAG.getNode(ISD::AND, DL, MVT::i32, OnesOrZeroesI, 13729 DAG.getConstant(1, MVT::i32)); 13730 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed); 13731 return OneBitOfTruth; 13732 } 13733 } 13734 } 13735 } 13736 return SDValue(); 13737} 13738 13739/// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector 13740/// so it can be folded inside ANDNP. 13741static bool CanFoldXORWithAllOnes(const SDNode *N) { 13742 EVT VT = N->getValueType(0); 13743 13744 // Match direct AllOnes for 128 and 256-bit vectors 13745 if (ISD::isBuildVectorAllOnes(N)) 13746 return true; 13747 13748 // Look through a bit convert. 13749 if (N->getOpcode() == ISD::BITCAST) 13750 N = N->getOperand(0).getNode(); 13751 13752 // Sometimes the operand may come from a insert_subvector building a 256-bit 13753 // allones vector 13754 if (VT.getSizeInBits() == 256 && 13755 N->getOpcode() == ISD::INSERT_SUBVECTOR) { 13756 SDValue V1 = N->getOperand(0); 13757 SDValue V2 = N->getOperand(1); 13758 13759 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR && 13760 V1.getOperand(0).getOpcode() == ISD::UNDEF && 13761 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) && 13762 ISD::isBuildVectorAllOnes(V2.getNode())) 13763 return true; 13764 } 13765 13766 return false; 13767} 13768 13769static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, 13770 TargetLowering::DAGCombinerInfo &DCI, 13771 const X86Subtarget *Subtarget) { 13772 if (DCI.isBeforeLegalizeOps()) 13773 return SDValue(); 13774 13775 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 13776 if (R.getNode()) 13777 return R; 13778 13779 EVT VT = N->getValueType(0); 13780 13781 // Create ANDN, BLSI, and BLSR instructions 13782 // BLSI is X & (-X) 13783 // BLSR is X & (X-1) 13784 if (Subtarget->hasBMI() && (VT == MVT::i32 || VT == MVT::i64)) { 13785 SDValue N0 = N->getOperand(0); 13786 SDValue N1 = N->getOperand(1); 13787 DebugLoc DL = N->getDebugLoc(); 13788 13789 // Check LHS for not 13790 if (N0.getOpcode() == ISD::XOR && isAllOnes(N0.getOperand(1))) 13791 return DAG.getNode(X86ISD::ANDN, DL, VT, N0.getOperand(0), N1); 13792 // Check RHS for not 13793 if (N1.getOpcode() == ISD::XOR && isAllOnes(N1.getOperand(1))) 13794 return DAG.getNode(X86ISD::ANDN, DL, VT, N1.getOperand(0), N0); 13795 13796 // Check LHS for neg 13797 if (N0.getOpcode() == ISD::SUB && N0.getOperand(1) == N1 && 13798 isZero(N0.getOperand(0))) 13799 return DAG.getNode(X86ISD::BLSI, DL, VT, N1); 13800 13801 // Check RHS for neg 13802 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1) == N0 && 13803 isZero(N1.getOperand(0))) 13804 return DAG.getNode(X86ISD::BLSI, DL, VT, N0); 13805 13806 // Check LHS for X-1 13807 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && 13808 isAllOnes(N0.getOperand(1))) 13809 return DAG.getNode(X86ISD::BLSR, DL, VT, N1); 13810 13811 // Check RHS for X-1 13812 if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 && 13813 isAllOnes(N1.getOperand(1))) 13814 return DAG.getNode(X86ISD::BLSR, DL, VT, N0); 13815 13816 return SDValue(); 13817 } 13818 13819 // Want to form ANDNP nodes: 13820 // 1) In the hopes of then easily combining them with OR and AND nodes 13821 // to form PBLEND/PSIGN. 13822 // 2) To match ANDN packed intrinsics 13823 if (VT != MVT::v2i64 && VT != MVT::v4i64) 13824 return SDValue(); 13825 13826 SDValue N0 = N->getOperand(0); 13827 SDValue N1 = N->getOperand(1); 13828 DebugLoc DL = N->getDebugLoc(); 13829 13830 // Check LHS for vnot 13831 if (N0.getOpcode() == ISD::XOR && 13832 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode())) 13833 CanFoldXORWithAllOnes(N0.getOperand(1).getNode())) 13834 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1); 13835 13836 // Check RHS for vnot 13837 if (N1.getOpcode() == ISD::XOR && 13838 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode())) 13839 CanFoldXORWithAllOnes(N1.getOperand(1).getNode())) 13840 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0); 13841 13842 return SDValue(); 13843} 13844 13845static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, 13846 TargetLowering::DAGCombinerInfo &DCI, 13847 const X86Subtarget *Subtarget) { 13848 if (DCI.isBeforeLegalizeOps()) 13849 return SDValue(); 13850 13851 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 13852 if (R.getNode()) 13853 return R; 13854 13855 EVT VT = N->getValueType(0); 13856 13857 SDValue N0 = N->getOperand(0); 13858 SDValue N1 = N->getOperand(1); 13859 13860 // look for psign/blend 13861 if (VT == MVT::v2i64 || VT == MVT::v4i64) { 13862 if (!Subtarget->hasSSSE3() || 13863 (VT == MVT::v4i64 && !Subtarget->hasAVX2())) 13864 return SDValue(); 13865 13866 // Canonicalize pandn to RHS 13867 if (N0.getOpcode() == X86ISD::ANDNP) 13868 std::swap(N0, N1); 13869 // or (and (m, y), (pandn m, x)) 13870 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) { 13871 SDValue Mask = N1.getOperand(0); 13872 SDValue X = N1.getOperand(1); 13873 SDValue Y; 13874 if (N0.getOperand(0) == Mask) 13875 Y = N0.getOperand(1); 13876 if (N0.getOperand(1) == Mask) 13877 Y = N0.getOperand(0); 13878 13879 // Check to see if the mask appeared in both the AND and ANDNP and 13880 if (!Y.getNode()) 13881 return SDValue(); 13882 13883 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them. 13884 if (Mask.getOpcode() != ISD::BITCAST || 13885 X.getOpcode() != ISD::BITCAST || 13886 Y.getOpcode() != ISD::BITCAST) 13887 return SDValue(); 13888 13889 // Look through mask bitcast. 13890 Mask = Mask.getOperand(0); 13891 EVT MaskVT = Mask.getValueType(); 13892 13893 // Validate that the Mask operand is a vector sra node. 13894 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but 13895 // there is no psrai.b 13896 if (Mask.getOpcode() != X86ISD::VSRAI) 13897 return SDValue(); 13898 13899 // Check that the SRA is all signbits. 13900 SDValue SraC = Mask.getOperand(1); 13901 unsigned SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue(); 13902 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits(); 13903 if ((SraAmt + 1) != EltBits) 13904 return SDValue(); 13905 13906 DebugLoc DL = N->getDebugLoc(); 13907 13908 // Now we know we at least have a plendvb with the mask val. See if 13909 // we can form a psignb/w/d. 13910 // psign = x.type == y.type == mask.type && y = sub(0, x); 13911 X = X.getOperand(0); 13912 Y = Y.getOperand(0); 13913 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X && 13914 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) && 13915 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) { 13916 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) && 13917 "Unsupported VT for PSIGN"); 13918 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0)); 13919 return DAG.getNode(ISD::BITCAST, DL, VT, Mask); 13920 } 13921 // PBLENDVB only available on SSE 4.1 13922 if (!Subtarget->hasSSE41()) 13923 return SDValue(); 13924 13925 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8; 13926 13927 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X); 13928 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y); 13929 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask); 13930 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X); 13931 return DAG.getNode(ISD::BITCAST, DL, VT, Mask); 13932 } 13933 } 13934 13935 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64) 13936 return SDValue(); 13937 13938 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) 13939 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 13940 std::swap(N0, N1); 13941 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 13942 return SDValue(); 13943 if (!N0.hasOneUse() || !N1.hasOneUse()) 13944 return SDValue(); 13945 13946 SDValue ShAmt0 = N0.getOperand(1); 13947 if (ShAmt0.getValueType() != MVT::i8) 13948 return SDValue(); 13949 SDValue ShAmt1 = N1.getOperand(1); 13950 if (ShAmt1.getValueType() != MVT::i8) 13951 return SDValue(); 13952 if (ShAmt0.getOpcode() == ISD::TRUNCATE) 13953 ShAmt0 = ShAmt0.getOperand(0); 13954 if (ShAmt1.getOpcode() == ISD::TRUNCATE) 13955 ShAmt1 = ShAmt1.getOperand(0); 13956 13957 DebugLoc DL = N->getDebugLoc(); 13958 unsigned Opc = X86ISD::SHLD; 13959 SDValue Op0 = N0.getOperand(0); 13960 SDValue Op1 = N1.getOperand(0); 13961 if (ShAmt0.getOpcode() == ISD::SUB) { 13962 Opc = X86ISD::SHRD; 13963 std::swap(Op0, Op1); 13964 std::swap(ShAmt0, ShAmt1); 13965 } 13966 13967 unsigned Bits = VT.getSizeInBits(); 13968 if (ShAmt1.getOpcode() == ISD::SUB) { 13969 SDValue Sum = ShAmt1.getOperand(0); 13970 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) { 13971 SDValue ShAmt1Op1 = ShAmt1.getOperand(1); 13972 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE) 13973 ShAmt1Op1 = ShAmt1Op1.getOperand(0); 13974 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0) 13975 return DAG.getNode(Opc, DL, VT, 13976 Op0, Op1, 13977 DAG.getNode(ISD::TRUNCATE, DL, 13978 MVT::i8, ShAmt0)); 13979 } 13980 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) { 13981 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0); 13982 if (ShAmt0C && 13983 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits) 13984 return DAG.getNode(Opc, DL, VT, 13985 N0.getOperand(0), N1.getOperand(0), 13986 DAG.getNode(ISD::TRUNCATE, DL, 13987 MVT::i8, ShAmt0)); 13988 } 13989 13990 return SDValue(); 13991} 13992 13993// PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes 13994static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG, 13995 TargetLowering::DAGCombinerInfo &DCI, 13996 const X86Subtarget *Subtarget) { 13997 if (DCI.isBeforeLegalizeOps()) 13998 return SDValue(); 13999 14000 EVT VT = N->getValueType(0); 14001 14002 if (VT != MVT::i32 && VT != MVT::i64) 14003 return SDValue(); 14004 14005 assert(Subtarget->hasBMI() && "Creating BLSMSK requires BMI instructions"); 14006 14007 // Create BLSMSK instructions by finding X ^ (X-1) 14008 SDValue N0 = N->getOperand(0); 14009 SDValue N1 = N->getOperand(1); 14010 DebugLoc DL = N->getDebugLoc(); 14011 14012 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && 14013 isAllOnes(N0.getOperand(1))) 14014 return DAG.getNode(X86ISD::BLSMSK, DL, VT, N1); 14015 14016 if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 && 14017 isAllOnes(N1.getOperand(1))) 14018 return DAG.getNode(X86ISD::BLSMSK, DL, VT, N0); 14019 14020 return SDValue(); 14021} 14022 14023/// PerformLOADCombine - Do target-specific dag combines on LOAD nodes. 14024static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, 14025 const X86Subtarget *Subtarget) { 14026 LoadSDNode *Ld = cast<LoadSDNode>(N); 14027 EVT RegVT = Ld->getValueType(0); 14028 EVT MemVT = Ld->getMemoryVT(); 14029 DebugLoc dl = Ld->getDebugLoc(); 14030 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14031 14032 ISD::LoadExtType Ext = Ld->getExtensionType(); 14033 14034 // If this is a vector EXT Load then attempt to optimize it using a 14035 // shuffle. We need SSE4 for the shuffles. 14036 // TODO: It is possible to support ZExt by zeroing the undef values 14037 // during the shuffle phase or after the shuffle. 14038 if (RegVT.isVector() && RegVT.isInteger() && 14039 Ext == ISD::EXTLOAD && Subtarget->hasSSE41()) { 14040 assert(MemVT != RegVT && "Cannot extend to the same type"); 14041 assert(MemVT.isVector() && "Must load a vector from memory"); 14042 14043 unsigned NumElems = RegVT.getVectorNumElements(); 14044 unsigned RegSz = RegVT.getSizeInBits(); 14045 unsigned MemSz = MemVT.getSizeInBits(); 14046 assert(RegSz > MemSz && "Register size must be greater than the mem size"); 14047 // All sizes must be a power of two 14048 if (!isPowerOf2_32(RegSz * MemSz * NumElems)) return SDValue(); 14049 14050 // Attempt to load the original value using a single load op. 14051 // Find a scalar type which is equal to the loaded word size. 14052 MVT SclrLoadTy = MVT::i8; 14053 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 14054 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 14055 MVT Tp = (MVT::SimpleValueType)tp; 14056 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() == MemSz) { 14057 SclrLoadTy = Tp; 14058 break; 14059 } 14060 } 14061 14062 // Proceed if a load word is found. 14063 if (SclrLoadTy.getSizeInBits() != MemSz) return SDValue(); 14064 14065 EVT LoadUnitVecVT = EVT::getVectorVT(*DAG.getContext(), SclrLoadTy, 14066 RegSz/SclrLoadTy.getSizeInBits()); 14067 14068 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), 14069 RegSz/MemVT.getScalarType().getSizeInBits()); 14070 // Can't shuffle using an illegal type. 14071 if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); 14072 14073 // Perform a single load. 14074 SDValue ScalarLoad = DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), 14075 Ld->getBasePtr(), 14076 Ld->getPointerInfo(), Ld->isVolatile(), 14077 Ld->isNonTemporal(), Ld->isInvariant(), 14078 Ld->getAlignment()); 14079 14080 // Insert the word loaded into a vector. 14081 SDValue ScalarInVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 14082 LoadUnitVecVT, ScalarLoad); 14083 14084 // Bitcast the loaded value to a vector of the original element type, in 14085 // the size of the target vector type. 14086 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, 14087 ScalarInVector); 14088 unsigned SizeRatio = RegSz/MemSz; 14089 14090 // Redistribute the loaded elements into the different locations. 14091 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 14092 for (unsigned i = 0; i < NumElems; i++) ShuffleVec[i*SizeRatio] = i; 14093 14094 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec, 14095 DAG.getUNDEF(SlicedVec.getValueType()), 14096 ShuffleVec.data()); 14097 14098 // Bitcast to the requested type. 14099 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff); 14100 // Replace the original load with the new sequence 14101 // and return the new chain. 14102 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Shuff); 14103 return SDValue(ScalarLoad.getNode(), 1); 14104 } 14105 14106 return SDValue(); 14107} 14108 14109/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 14110static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, 14111 const X86Subtarget *Subtarget) { 14112 StoreSDNode *St = cast<StoreSDNode>(N); 14113 EVT VT = St->getValue().getValueType(); 14114 EVT StVT = St->getMemoryVT(); 14115 DebugLoc dl = St->getDebugLoc(); 14116 SDValue StoredVal = St->getOperand(1); 14117 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14118 14119 // If we are saving a concatenation of two XMM registers, perform two stores. 14120 // This is better in Sandy Bridge cause one 256-bit mem op is done via two 14121 // 128-bit ones. If in the future the cost becomes only one memory access the 14122 // first version would be better. 14123 if (VT.getSizeInBits() == 256 && 14124 StoredVal.getNode()->getOpcode() == ISD::CONCAT_VECTORS && 14125 StoredVal.getNumOperands() == 2) { 14126 14127 SDValue Value0 = StoredVal.getOperand(0); 14128 SDValue Value1 = StoredVal.getOperand(1); 14129 14130 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy()); 14131 SDValue Ptr0 = St->getBasePtr(); 14132 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride); 14133 14134 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0, 14135 St->getPointerInfo(), St->isVolatile(), 14136 St->isNonTemporal(), St->getAlignment()); 14137 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1, 14138 St->getPointerInfo(), St->isVolatile(), 14139 St->isNonTemporal(), St->getAlignment()); 14140 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1); 14141 } 14142 14143 // Optimize trunc store (of multiple scalars) to shuffle and store. 14144 // First, pack all of the elements in one place. Next, store to memory 14145 // in fewer chunks. 14146 if (St->isTruncatingStore() && VT.isVector()) { 14147 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14148 unsigned NumElems = VT.getVectorNumElements(); 14149 assert(StVT != VT && "Cannot truncate to the same type"); 14150 unsigned FromSz = VT.getVectorElementType().getSizeInBits(); 14151 unsigned ToSz = StVT.getVectorElementType().getSizeInBits(); 14152 14153 // From, To sizes and ElemCount must be pow of two 14154 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue(); 14155 // We are going to use the original vector elt for storing. 14156 // Accumulated smaller vector elements must be a multiple of the store size. 14157 if (0 != (NumElems * FromSz) % ToSz) return SDValue(); 14158 14159 unsigned SizeRatio = FromSz / ToSz; 14160 14161 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits()); 14162 14163 // Create a type on which we perform the shuffle 14164 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), 14165 StVT.getScalarType(), NumElems*SizeRatio); 14166 14167 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 14168 14169 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue()); 14170 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 14171 for (unsigned i = 0; i < NumElems; i++ ) ShuffleVec[i] = i * SizeRatio; 14172 14173 // Can't shuffle using an illegal type 14174 if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); 14175 14176 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec, 14177 DAG.getUNDEF(WideVec.getValueType()), 14178 ShuffleVec.data()); 14179 // At this point all of the data is stored at the bottom of the 14180 // register. We now need to save it to mem. 14181 14182 // Find the largest store unit 14183 MVT StoreType = MVT::i8; 14184 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 14185 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 14186 MVT Tp = (MVT::SimpleValueType)tp; 14187 if (TLI.isTypeLegal(Tp) && StoreType.getSizeInBits() < NumElems * ToSz) 14188 StoreType = Tp; 14189 } 14190 14191 // Bitcast the original vector into a vector of store-size units 14192 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), 14193 StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits()); 14194 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 14195 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff); 14196 SmallVector<SDValue, 8> Chains; 14197 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8, 14198 TLI.getPointerTy()); 14199 SDValue Ptr = St->getBasePtr(); 14200 14201 // Perform one or more big stores into memory. 14202 for (unsigned i = 0; i < (ToSz*NumElems)/StoreType.getSizeInBits() ; i++) { 14203 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 14204 StoreType, ShuffWide, 14205 DAG.getIntPtrConstant(i)); 14206 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr, 14207 St->getPointerInfo(), St->isVolatile(), 14208 St->isNonTemporal(), St->getAlignment()); 14209 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 14210 Chains.push_back(Ch); 14211 } 14212 14213 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], 14214 Chains.size()); 14215 } 14216 14217 14218 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 14219 // the FP state in cases where an emms may be missing. 14220 // A preferable solution to the general problem is to figure out the right 14221 // places to insert EMMS. This qualifies as a quick hack. 14222 14223 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode. 14224 if (VT.getSizeInBits() != 64) 14225 return SDValue(); 14226 14227 const Function *F = DAG.getMachineFunction().getFunction(); 14228 bool NoImplicitFloatOps = F->hasFnAttr(Attribute::NoImplicitFloat); 14229 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps 14230 && Subtarget->hasSSE2(); 14231 if ((VT.isVector() || 14232 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) && 14233 isa<LoadSDNode>(St->getValue()) && 14234 !cast<LoadSDNode>(St->getValue())->isVolatile() && 14235 St->getChain().hasOneUse() && !St->isVolatile()) { 14236 SDNode* LdVal = St->getValue().getNode(); 14237 LoadSDNode *Ld = 0; 14238 int TokenFactorIndex = -1; 14239 SmallVector<SDValue, 8> Ops; 14240 SDNode* ChainVal = St->getChain().getNode(); 14241 // Must be a store of a load. We currently handle two cases: the load 14242 // is a direct child, and it's under an intervening TokenFactor. It is 14243 // possible to dig deeper under nested TokenFactors. 14244 if (ChainVal == LdVal) 14245 Ld = cast<LoadSDNode>(St->getChain()); 14246 else if (St->getValue().hasOneUse() && 14247 ChainVal->getOpcode() == ISD::TokenFactor) { 14248 for (unsigned i=0, e = ChainVal->getNumOperands(); i != e; ++i) { 14249 if (ChainVal->getOperand(i).getNode() == LdVal) { 14250 TokenFactorIndex = i; 14251 Ld = cast<LoadSDNode>(St->getValue()); 14252 } else 14253 Ops.push_back(ChainVal->getOperand(i)); 14254 } 14255 } 14256 14257 if (!Ld || !ISD::isNormalLoad(Ld)) 14258 return SDValue(); 14259 14260 // If this is not the MMX case, i.e. we are just turning i64 load/store 14261 // into f64 load/store, avoid the transformation if there are multiple 14262 // uses of the loaded value. 14263 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0)) 14264 return SDValue(); 14265 14266 DebugLoc LdDL = Ld->getDebugLoc(); 14267 DebugLoc StDL = N->getDebugLoc(); 14268 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 14269 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store 14270 // pair instead. 14271 if (Subtarget->is64Bit() || F64IsLegal) { 14272 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64; 14273 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(), 14274 Ld->getPointerInfo(), Ld->isVolatile(), 14275 Ld->isNonTemporal(), Ld->isInvariant(), 14276 Ld->getAlignment()); 14277 SDValue NewChain = NewLd.getValue(1); 14278 if (TokenFactorIndex != -1) { 14279 Ops.push_back(NewChain); 14280 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 14281 Ops.size()); 14282 } 14283 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(), 14284 St->getPointerInfo(), 14285 St->isVolatile(), St->isNonTemporal(), 14286 St->getAlignment()); 14287 } 14288 14289 // Otherwise, lower to two pairs of 32-bit loads / stores. 14290 SDValue LoAddr = Ld->getBasePtr(); 14291 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr, 14292 DAG.getConstant(4, MVT::i32)); 14293 14294 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr, 14295 Ld->getPointerInfo(), 14296 Ld->isVolatile(), Ld->isNonTemporal(), 14297 Ld->isInvariant(), Ld->getAlignment()); 14298 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr, 14299 Ld->getPointerInfo().getWithOffset(4), 14300 Ld->isVolatile(), Ld->isNonTemporal(), 14301 Ld->isInvariant(), 14302 MinAlign(Ld->getAlignment(), 4)); 14303 14304 SDValue NewChain = LoLd.getValue(1); 14305 if (TokenFactorIndex != -1) { 14306 Ops.push_back(LoLd); 14307 Ops.push_back(HiLd); 14308 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 14309 Ops.size()); 14310 } 14311 14312 LoAddr = St->getBasePtr(); 14313 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr, 14314 DAG.getConstant(4, MVT::i32)); 14315 14316 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr, 14317 St->getPointerInfo(), 14318 St->isVolatile(), St->isNonTemporal(), 14319 St->getAlignment()); 14320 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr, 14321 St->getPointerInfo().getWithOffset(4), 14322 St->isVolatile(), 14323 St->isNonTemporal(), 14324 MinAlign(St->getAlignment(), 4)); 14325 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt); 14326 } 14327 return SDValue(); 14328} 14329 14330/// isHorizontalBinOp - Return 'true' if this vector operation is "horizontal" 14331/// and return the operands for the horizontal operation in LHS and RHS. A 14332/// horizontal operation performs the binary operation on successive elements 14333/// of its first operand, then on successive elements of its second operand, 14334/// returning the resulting values in a vector. For example, if 14335/// A = < float a0, float a1, float a2, float a3 > 14336/// and 14337/// B = < float b0, float b1, float b2, float b3 > 14338/// then the result of doing a horizontal operation on A and B is 14339/// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >. 14340/// In short, LHS and RHS are inspected to see if LHS op RHS is of the form 14341/// A horizontal-op B, for some already available A and B, and if so then LHS is 14342/// set to A, RHS to B, and the routine returns 'true'. 14343/// Note that the binary operation should have the property that if one of the 14344/// operands is UNDEF then the result is UNDEF. 14345static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) { 14346 // Look for the following pattern: if 14347 // A = < float a0, float a1, float a2, float a3 > 14348 // B = < float b0, float b1, float b2, float b3 > 14349 // and 14350 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6> 14351 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7> 14352 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 > 14353 // which is A horizontal-op B. 14354 14355 // At least one of the operands should be a vector shuffle. 14356 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE && 14357 RHS.getOpcode() != ISD::VECTOR_SHUFFLE) 14358 return false; 14359 14360 EVT VT = LHS.getValueType(); 14361 14362 assert((VT.is128BitVector() || VT.is256BitVector()) && 14363 "Unsupported vector type for horizontal add/sub"); 14364 14365 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to 14366 // operate independently on 128-bit lanes. 14367 unsigned NumElts = VT.getVectorNumElements(); 14368 unsigned NumLanes = VT.getSizeInBits()/128; 14369 unsigned NumLaneElts = NumElts / NumLanes; 14370 assert((NumLaneElts % 2 == 0) && 14371 "Vector type should have an even number of elements in each lane"); 14372 unsigned HalfLaneElts = NumLaneElts/2; 14373 14374 // View LHS in the form 14375 // LHS = VECTOR_SHUFFLE A, B, LMask 14376 // If LHS is not a shuffle then pretend it is the shuffle 14377 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1> 14378 // NOTE: in what follows a default initialized SDValue represents an UNDEF of 14379 // type VT. 14380 SDValue A, B; 14381 SmallVector<int, 16> LMask(NumElts); 14382 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) { 14383 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF) 14384 A = LHS.getOperand(0); 14385 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF) 14386 B = LHS.getOperand(1); 14387 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask(); 14388 std::copy(Mask.begin(), Mask.end(), LMask.begin()); 14389 } else { 14390 if (LHS.getOpcode() != ISD::UNDEF) 14391 A = LHS; 14392 for (unsigned i = 0; i != NumElts; ++i) 14393 LMask[i] = i; 14394 } 14395 14396 // Likewise, view RHS in the form 14397 // RHS = VECTOR_SHUFFLE C, D, RMask 14398 SDValue C, D; 14399 SmallVector<int, 16> RMask(NumElts); 14400 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) { 14401 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF) 14402 C = RHS.getOperand(0); 14403 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF) 14404 D = RHS.getOperand(1); 14405 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask(); 14406 std::copy(Mask.begin(), Mask.end(), RMask.begin()); 14407 } else { 14408 if (RHS.getOpcode() != ISD::UNDEF) 14409 C = RHS; 14410 for (unsigned i = 0; i != NumElts; ++i) 14411 RMask[i] = i; 14412 } 14413 14414 // Check that the shuffles are both shuffling the same vectors. 14415 if (!(A == C && B == D) && !(A == D && B == C)) 14416 return false; 14417 14418 // If everything is UNDEF then bail out: it would be better to fold to UNDEF. 14419 if (!A.getNode() && !B.getNode()) 14420 return false; 14421 14422 // If A and B occur in reverse order in RHS, then "swap" them (which means 14423 // rewriting the mask). 14424 if (A != C) 14425 CommuteVectorShuffleMask(RMask, NumElts); 14426 14427 // At this point LHS and RHS are equivalent to 14428 // LHS = VECTOR_SHUFFLE A, B, LMask 14429 // RHS = VECTOR_SHUFFLE A, B, RMask 14430 // Check that the masks correspond to performing a horizontal operation. 14431 for (unsigned i = 0; i != NumElts; ++i) { 14432 int LIdx = LMask[i], RIdx = RMask[i]; 14433 14434 // Ignore any UNDEF components. 14435 if (LIdx < 0 || RIdx < 0 || 14436 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) || 14437 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts))) 14438 continue; 14439 14440 // Check that successive elements are being operated on. If not, this is 14441 // not a horizontal operation. 14442 unsigned Src = (i/HalfLaneElts) % 2; // each lane is split between srcs 14443 unsigned LaneStart = (i/NumLaneElts) * NumLaneElts; 14444 int Index = 2*(i%HalfLaneElts) + NumElts*Src + LaneStart; 14445 if (!(LIdx == Index && RIdx == Index + 1) && 14446 !(IsCommutative && LIdx == Index + 1 && RIdx == Index)) 14447 return false; 14448 } 14449 14450 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it. 14451 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it. 14452 return true; 14453} 14454 14455/// PerformFADDCombine - Do target-specific dag combines on floating point adds. 14456static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, 14457 const X86Subtarget *Subtarget) { 14458 EVT VT = N->getValueType(0); 14459 SDValue LHS = N->getOperand(0); 14460 SDValue RHS = N->getOperand(1); 14461 14462 // Try to synthesize horizontal adds from adds of shuffles. 14463 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || 14464 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && 14465 isHorizontalBinOp(LHS, RHS, true)) 14466 return DAG.getNode(X86ISD::FHADD, N->getDebugLoc(), VT, LHS, RHS); 14467 return SDValue(); 14468} 14469 14470/// PerformFSUBCombine - Do target-specific dag combines on floating point subs. 14471static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG, 14472 const X86Subtarget *Subtarget) { 14473 EVT VT = N->getValueType(0); 14474 SDValue LHS = N->getOperand(0); 14475 SDValue RHS = N->getOperand(1); 14476 14477 // Try to synthesize horizontal subs from subs of shuffles. 14478 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || 14479 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && 14480 isHorizontalBinOp(LHS, RHS, false)) 14481 return DAG.getNode(X86ISD::FHSUB, N->getDebugLoc(), VT, LHS, RHS); 14482 return SDValue(); 14483} 14484 14485/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 14486/// X86ISD::FXOR nodes. 14487static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 14488 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 14489 // F[X]OR(0.0, x) -> x 14490 // F[X]OR(x, 0.0) -> x 14491 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 14492 if (C->getValueAPF().isPosZero()) 14493 return N->getOperand(1); 14494 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 14495 if (C->getValueAPF().isPosZero()) 14496 return N->getOperand(0); 14497 return SDValue(); 14498} 14499 14500/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 14501static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 14502 // FAND(0.0, x) -> 0.0 14503 // FAND(x, 0.0) -> 0.0 14504 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 14505 if (C->getValueAPF().isPosZero()) 14506 return N->getOperand(0); 14507 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 14508 if (C->getValueAPF().isPosZero()) 14509 return N->getOperand(1); 14510 return SDValue(); 14511} 14512 14513static SDValue PerformBTCombine(SDNode *N, 14514 SelectionDAG &DAG, 14515 TargetLowering::DAGCombinerInfo &DCI) { 14516 // BT ignores high bits in the bit index operand. 14517 SDValue Op1 = N->getOperand(1); 14518 if (Op1.hasOneUse()) { 14519 unsigned BitWidth = Op1.getValueSizeInBits(); 14520 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth)); 14521 APInt KnownZero, KnownOne; 14522 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 14523 !DCI.isBeforeLegalizeOps()); 14524 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14525 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) || 14526 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO)) 14527 DCI.CommitTargetLoweringOpt(TLO); 14528 } 14529 return SDValue(); 14530} 14531 14532static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) { 14533 SDValue Op = N->getOperand(0); 14534 if (Op.getOpcode() == ISD::BITCAST) 14535 Op = Op.getOperand(0); 14536 EVT VT = N->getValueType(0), OpVT = Op.getValueType(); 14537 if (Op.getOpcode() == X86ISD::VZEXT_LOAD && 14538 VT.getVectorElementType().getSizeInBits() == 14539 OpVT.getVectorElementType().getSizeInBits()) { 14540 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 14541 } 14542 return SDValue(); 14543} 14544 14545static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, 14546 const X86Subtarget *Subtarget) { 14547 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) -> 14548 // (and (i32 x86isd::setcc_carry), 1) 14549 // This eliminates the zext. This transformation is necessary because 14550 // ISD::SETCC is always legalized to i8. 14551 DebugLoc dl = N->getDebugLoc(); 14552 SDValue N0 = N->getOperand(0); 14553 EVT VT = N->getValueType(0); 14554 EVT OpVT = N0.getValueType(); 14555 14556 if (N0.getOpcode() == ISD::AND && 14557 N0.hasOneUse() && 14558 N0.getOperand(0).hasOneUse()) { 14559 SDValue N00 = N0.getOperand(0); 14560 if (N00.getOpcode() != X86ISD::SETCC_CARRY) 14561 return SDValue(); 14562 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 14563 if (!C || C->getZExtValue() != 1) 14564 return SDValue(); 14565 return DAG.getNode(ISD::AND, dl, VT, 14566 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, 14567 N00.getOperand(0), N00.getOperand(1)), 14568 DAG.getConstant(1, VT)); 14569 } 14570 // Optimize vectors in AVX mode: 14571 // 14572 // v8i16 -> v8i32 14573 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32. 14574 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32. 14575 // Concat upper and lower parts. 14576 // 14577 // v4i32 -> v4i64 14578 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64. 14579 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64. 14580 // Concat upper and lower parts. 14581 // 14582 if (Subtarget->hasAVX()) { 14583 14584 if (((VT == MVT::v8i32) && (OpVT == MVT::v8i16)) || 14585 ((VT == MVT::v4i64) && (OpVT == MVT::v4i32))) { 14586 14587 SDValue ZeroVec = getZeroVector(OpVT, Subtarget->hasSSE2(), Subtarget->hasAVX2(), 14588 DAG, dl); 14589 SDValue OpLo = getTargetShuffleNode(X86ISD::UNPCKL, dl, OpVT, N0, ZeroVec, DAG); 14590 SDValue OpHi = getTargetShuffleNode(X86ISD::UNPCKH, dl, OpVT, N0, ZeroVec, DAG); 14591 14592 EVT HVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 14593 VT.getVectorNumElements()/2); 14594 14595 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo); 14596 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi); 14597 14598 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); 14599 } 14600 } 14601 14602 14603 return SDValue(); 14604} 14605 14606// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT 14607static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG) { 14608 unsigned X86CC = N->getConstantOperandVal(0); 14609 SDValue EFLAG = N->getOperand(1); 14610 DebugLoc DL = N->getDebugLoc(); 14611 14612 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without 14613 // a zext and produces an all-ones bit which is more useful than 0/1 in some 14614 // cases. 14615 if (X86CC == X86::COND_B) 14616 return DAG.getNode(ISD::AND, DL, MVT::i8, 14617 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, 14618 DAG.getConstant(X86CC, MVT::i8), EFLAG), 14619 DAG.getConstant(1, MVT::i8)); 14620 14621 return SDValue(); 14622} 14623 14624static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, 14625 const X86TargetLowering *XTLI) { 14626 SDValue Op0 = N->getOperand(0); 14627 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have 14628 // a 32-bit target where SSE doesn't support i64->FP operations. 14629 if (Op0.getOpcode() == ISD::LOAD) { 14630 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode()); 14631 EVT VT = Ld->getValueType(0); 14632 if (!Ld->isVolatile() && !N->getValueType(0).isVector() && 14633 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() && 14634 !XTLI->getSubtarget()->is64Bit() && 14635 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 14636 SDValue FILDChain = XTLI->BuildFILD(SDValue(N, 0), Ld->getValueType(0), 14637 Ld->getChain(), Op0, DAG); 14638 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1)); 14639 return FILDChain; 14640 } 14641 } 14642 return SDValue(); 14643} 14644 14645// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS 14646static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, 14647 X86TargetLowering::DAGCombinerInfo &DCI) { 14648 // If the LHS and RHS of the ADC node are zero, then it can't overflow and 14649 // the result is either zero or one (depending on the input carry bit). 14650 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1. 14651 if (X86::isZeroNode(N->getOperand(0)) && 14652 X86::isZeroNode(N->getOperand(1)) && 14653 // We don't have a good way to replace an EFLAGS use, so only do this when 14654 // dead right now. 14655 SDValue(N, 1).use_empty()) { 14656 DebugLoc DL = N->getDebugLoc(); 14657 EVT VT = N->getValueType(0); 14658 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1)); 14659 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT, 14660 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, 14661 DAG.getConstant(X86::COND_B,MVT::i8), 14662 N->getOperand(2)), 14663 DAG.getConstant(1, VT)); 14664 return DCI.CombineTo(N, Res1, CarryOut); 14665 } 14666 14667 return SDValue(); 14668} 14669 14670// fold (add Y, (sete X, 0)) -> adc 0, Y 14671// (add Y, (setne X, 0)) -> sbb -1, Y 14672// (sub (sete X, 0), Y) -> sbb 0, Y 14673// (sub (setne X, 0), Y) -> adc -1, Y 14674static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) { 14675 DebugLoc DL = N->getDebugLoc(); 14676 14677 // Look through ZExts. 14678 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0); 14679 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse()) 14680 return SDValue(); 14681 14682 SDValue SetCC = Ext.getOperand(0); 14683 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse()) 14684 return SDValue(); 14685 14686 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0); 14687 if (CC != X86::COND_E && CC != X86::COND_NE) 14688 return SDValue(); 14689 14690 SDValue Cmp = SetCC.getOperand(1); 14691 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() || 14692 !X86::isZeroNode(Cmp.getOperand(1)) || 14693 !Cmp.getOperand(0).getValueType().isInteger()) 14694 return SDValue(); 14695 14696 SDValue CmpOp0 = Cmp.getOperand(0); 14697 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0, 14698 DAG.getConstant(1, CmpOp0.getValueType())); 14699 14700 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1); 14701 if (CC == X86::COND_NE) 14702 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB, 14703 DL, OtherVal.getValueType(), OtherVal, 14704 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp); 14705 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC, 14706 DL, OtherVal.getValueType(), OtherVal, 14707 DAG.getConstant(0, OtherVal.getValueType()), NewCmp); 14708} 14709 14710/// PerformADDCombine - Do target-specific dag combines on integer adds. 14711static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG, 14712 const X86Subtarget *Subtarget) { 14713 EVT VT = N->getValueType(0); 14714 SDValue Op0 = N->getOperand(0); 14715 SDValue Op1 = N->getOperand(1); 14716 14717 // Try to synthesize horizontal adds from adds of shuffles. 14718 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || 14719 (Subtarget->hasAVX2() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && 14720 isHorizontalBinOp(Op0, Op1, true)) 14721 return DAG.getNode(X86ISD::HADD, N->getDebugLoc(), VT, Op0, Op1); 14722 14723 return OptimizeConditionalInDecrement(N, DAG); 14724} 14725 14726static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG, 14727 const X86Subtarget *Subtarget) { 14728 SDValue Op0 = N->getOperand(0); 14729 SDValue Op1 = N->getOperand(1); 14730 14731 // X86 can't encode an immediate LHS of a sub. See if we can push the 14732 // negation into a preceding instruction. 14733 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) { 14734 // If the RHS of the sub is a XOR with one use and a constant, invert the 14735 // immediate. Then add one to the LHS of the sub so we can turn 14736 // X-Y -> X+~Y+1, saving one register. 14737 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR && 14738 isa<ConstantSDNode>(Op1.getOperand(1))) { 14739 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue(); 14740 EVT VT = Op0.getValueType(); 14741 SDValue NewXor = DAG.getNode(ISD::XOR, Op1.getDebugLoc(), VT, 14742 Op1.getOperand(0), 14743 DAG.getConstant(~XorC, VT)); 14744 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, NewXor, 14745 DAG.getConstant(C->getAPIntValue()+1, VT)); 14746 } 14747 } 14748 14749 // Try to synthesize horizontal adds from adds of shuffles. 14750 EVT VT = N->getValueType(0); 14751 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || 14752 (Subtarget->hasAVX2() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && 14753 isHorizontalBinOp(Op0, Op1, true)) 14754 return DAG.getNode(X86ISD::HSUB, N->getDebugLoc(), VT, Op0, Op1); 14755 14756 return OptimizeConditionalInDecrement(N, DAG); 14757} 14758 14759SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, 14760 DAGCombinerInfo &DCI) const { 14761 SelectionDAG &DAG = DCI.DAG; 14762 switch (N->getOpcode()) { 14763 default: break; 14764 case ISD::EXTRACT_VECTOR_ELT: 14765 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, *this); 14766 case ISD::VSELECT: 14767 case ISD::SELECT: return PerformSELECTCombine(N, DAG, DCI, Subtarget); 14768 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI); 14769 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget); 14770 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget); 14771 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI); 14772 case ISD::MUL: return PerformMulCombine(N, DAG, DCI); 14773 case ISD::SHL: 14774 case ISD::SRA: 14775 case ISD::SRL: return PerformShiftCombine(N, DAG, Subtarget); 14776 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget); 14777 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget); 14778 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget); 14779 case ISD::LOAD: return PerformLOADCombine(N, DAG, Subtarget); 14780 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); 14781 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this); 14782 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget); 14783 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget); 14784 case X86ISD::FXOR: 14785 case X86ISD::FOR: return PerformFORCombine(N, DAG); 14786 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 14787 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); 14788 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); 14789 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, Subtarget); 14790 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG); 14791 case X86ISD::SHUFP: // Handle all target specific shuffles 14792 case X86ISD::PALIGN: 14793 case X86ISD::UNPCKH: 14794 case X86ISD::UNPCKL: 14795 case X86ISD::MOVHLPS: 14796 case X86ISD::MOVLHPS: 14797 case X86ISD::PSHUFD: 14798 case X86ISD::PSHUFHW: 14799 case X86ISD::PSHUFLW: 14800 case X86ISD::MOVSS: 14801 case X86ISD::MOVSD: 14802 case X86ISD::VPERMILP: 14803 case X86ISD::VPERM2X128: 14804 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget); 14805 } 14806 14807 return SDValue(); 14808} 14809 14810/// isTypeDesirableForOp - Return true if the target has native support for 14811/// the specified value type and it is 'desirable' to use the type for the 14812/// given node type. e.g. On x86 i16 is legal, but undesirable since i16 14813/// instruction encodings are longer and some i16 instructions are slow. 14814bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const { 14815 if (!isTypeLegal(VT)) 14816 return false; 14817 if (VT != MVT::i16) 14818 return true; 14819 14820 switch (Opc) { 14821 default: 14822 return true; 14823 case ISD::LOAD: 14824 case ISD::SIGN_EXTEND: 14825 case ISD::ZERO_EXTEND: 14826 case ISD::ANY_EXTEND: 14827 case ISD::SHL: 14828 case ISD::SRL: 14829 case ISD::SUB: 14830 case ISD::ADD: 14831 case ISD::MUL: 14832 case ISD::AND: 14833 case ISD::OR: 14834 case ISD::XOR: 14835 return false; 14836 } 14837} 14838 14839/// IsDesirableToPromoteOp - This method query the target whether it is 14840/// beneficial for dag combiner to promote the specified node. If true, it 14841/// should return the desired promotion type by reference. 14842bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { 14843 EVT VT = Op.getValueType(); 14844 if (VT != MVT::i16) 14845 return false; 14846 14847 bool Promote = false; 14848 bool Commute = false; 14849 switch (Op.getOpcode()) { 14850 default: break; 14851 case ISD::LOAD: { 14852 LoadSDNode *LD = cast<LoadSDNode>(Op); 14853 // If the non-extending load has a single use and it's not live out, then it 14854 // might be folded. 14855 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&& 14856 Op.hasOneUse()*/) { 14857 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 14858 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 14859 // The only case where we'd want to promote LOAD (rather then it being 14860 // promoted as an operand is when it's only use is liveout. 14861 if (UI->getOpcode() != ISD::CopyToReg) 14862 return false; 14863 } 14864 } 14865 Promote = true; 14866 break; 14867 } 14868 case ISD::SIGN_EXTEND: 14869 case ISD::ZERO_EXTEND: 14870 case ISD::ANY_EXTEND: 14871 Promote = true; 14872 break; 14873 case ISD::SHL: 14874 case ISD::SRL: { 14875 SDValue N0 = Op.getOperand(0); 14876 // Look out for (store (shl (load), x)). 14877 if (MayFoldLoad(N0) && MayFoldIntoStore(Op)) 14878 return false; 14879 Promote = true; 14880 break; 14881 } 14882 case ISD::ADD: 14883 case ISD::MUL: 14884 case ISD::AND: 14885 case ISD::OR: 14886 case ISD::XOR: 14887 Commute = true; 14888 // fallthrough 14889 case ISD::SUB: { 14890 SDValue N0 = Op.getOperand(0); 14891 SDValue N1 = Op.getOperand(1); 14892 if (!Commute && MayFoldLoad(N1)) 14893 return false; 14894 // Avoid disabling potential load folding opportunities. 14895 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op))) 14896 return false; 14897 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op))) 14898 return false; 14899 Promote = true; 14900 } 14901 } 14902 14903 PVT = MVT::i32; 14904 return Promote; 14905} 14906 14907//===----------------------------------------------------------------------===// 14908// X86 Inline Assembly Support 14909//===----------------------------------------------------------------------===// 14910 14911namespace { 14912 // Helper to match a string separated by whitespace. 14913 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) { 14914 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace. 14915 14916 for (unsigned i = 0, e = args.size(); i != e; ++i) { 14917 StringRef piece(*args[i]); 14918 if (!s.startswith(piece)) // Check if the piece matches. 14919 return false; 14920 14921 s = s.substr(piece.size()); 14922 StringRef::size_type pos = s.find_first_not_of(" \t"); 14923 if (pos == 0) // We matched a prefix. 14924 return false; 14925 14926 s = s.substr(pos); 14927 } 14928 14929 return s.empty(); 14930 } 14931 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={}; 14932} 14933 14934bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { 14935 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 14936 14937 std::string AsmStr = IA->getAsmString(); 14938 14939 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 14940 if (!Ty || Ty->getBitWidth() % 16 != 0) 14941 return false; 14942 14943 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a" 14944 SmallVector<StringRef, 4> AsmPieces; 14945 SplitString(AsmStr, AsmPieces, ";\n"); 14946 14947 switch (AsmPieces.size()) { 14948 default: return false; 14949 case 1: 14950 // FIXME: this should verify that we are targeting a 486 or better. If not, 14951 // we will turn this bswap into something that will be lowered to logical 14952 // ops instead of emitting the bswap asm. For now, we don't support 486 or 14953 // lower so don't worry about this. 14954 // bswap $0 14955 if (matchAsm(AsmPieces[0], "bswap", "$0") || 14956 matchAsm(AsmPieces[0], "bswapl", "$0") || 14957 matchAsm(AsmPieces[0], "bswapq", "$0") || 14958 matchAsm(AsmPieces[0], "bswap", "${0:q}") || 14959 matchAsm(AsmPieces[0], "bswapl", "${0:q}") || 14960 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) { 14961 // No need to check constraints, nothing other than the equivalent of 14962 // "=r,0" would be valid here. 14963 return IntrinsicLowering::LowerToByteSwap(CI); 14964 } 14965 14966 // rorw $$8, ${0:w} --> llvm.bswap.i16 14967 if (CI->getType()->isIntegerTy(16) && 14968 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && 14969 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") || 14970 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) { 14971 AsmPieces.clear(); 14972 const std::string &ConstraintsStr = IA->getConstraintString(); 14973 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 14974 std::sort(AsmPieces.begin(), AsmPieces.end()); 14975 if (AsmPieces.size() == 4 && 14976 AsmPieces[0] == "~{cc}" && 14977 AsmPieces[1] == "~{dirflag}" && 14978 AsmPieces[2] == "~{flags}" && 14979 AsmPieces[3] == "~{fpsr}") 14980 return IntrinsicLowering::LowerToByteSwap(CI); 14981 } 14982 break; 14983 case 3: 14984 if (CI->getType()->isIntegerTy(32) && 14985 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && 14986 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") && 14987 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") && 14988 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) { 14989 AsmPieces.clear(); 14990 const std::string &ConstraintsStr = IA->getConstraintString(); 14991 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 14992 std::sort(AsmPieces.begin(), AsmPieces.end()); 14993 if (AsmPieces.size() == 4 && 14994 AsmPieces[0] == "~{cc}" && 14995 AsmPieces[1] == "~{dirflag}" && 14996 AsmPieces[2] == "~{flags}" && 14997 AsmPieces[3] == "~{fpsr}") 14998 return IntrinsicLowering::LowerToByteSwap(CI); 14999 } 15000 15001 if (CI->getType()->isIntegerTy(64)) { 15002 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints(); 15003 if (Constraints.size() >= 2 && 15004 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" && 15005 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") { 15006 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64 15007 if (matchAsm(AsmPieces[0], "bswap", "%eax") && 15008 matchAsm(AsmPieces[1], "bswap", "%edx") && 15009 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx")) 15010 return IntrinsicLowering::LowerToByteSwap(CI); 15011 } 15012 } 15013 break; 15014 } 15015 return false; 15016} 15017 15018 15019 15020/// getConstraintType - Given a constraint letter, return the type of 15021/// constraint it is for this target. 15022X86TargetLowering::ConstraintType 15023X86TargetLowering::getConstraintType(const std::string &Constraint) const { 15024 if (Constraint.size() == 1) { 15025 switch (Constraint[0]) { 15026 case 'R': 15027 case 'q': 15028 case 'Q': 15029 case 'f': 15030 case 't': 15031 case 'u': 15032 case 'y': 15033 case 'x': 15034 case 'Y': 15035 case 'l': 15036 return C_RegisterClass; 15037 case 'a': 15038 case 'b': 15039 case 'c': 15040 case 'd': 15041 case 'S': 15042 case 'D': 15043 case 'A': 15044 return C_Register; 15045 case 'I': 15046 case 'J': 15047 case 'K': 15048 case 'L': 15049 case 'M': 15050 case 'N': 15051 case 'G': 15052 case 'C': 15053 case 'e': 15054 case 'Z': 15055 return C_Other; 15056 default: 15057 break; 15058 } 15059 } 15060 return TargetLowering::getConstraintType(Constraint); 15061} 15062 15063/// Examine constraint type and operand type and determine a weight value. 15064/// This object must already have been set up with the operand type 15065/// and the current alternative constraint selected. 15066TargetLowering::ConstraintWeight 15067 X86TargetLowering::getSingleConstraintMatchWeight( 15068 AsmOperandInfo &info, const char *constraint) const { 15069 ConstraintWeight weight = CW_Invalid; 15070 Value *CallOperandVal = info.CallOperandVal; 15071 // If we don't have a value, we can't do a match, 15072 // but allow it at the lowest weight. 15073 if (CallOperandVal == NULL) 15074 return CW_Default; 15075 Type *type = CallOperandVal->getType(); 15076 // Look at the constraint type. 15077 switch (*constraint) { 15078 default: 15079 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 15080 case 'R': 15081 case 'q': 15082 case 'Q': 15083 case 'a': 15084 case 'b': 15085 case 'c': 15086 case 'd': 15087 case 'S': 15088 case 'D': 15089 case 'A': 15090 if (CallOperandVal->getType()->isIntegerTy()) 15091 weight = CW_SpecificReg; 15092 break; 15093 case 'f': 15094 case 't': 15095 case 'u': 15096 if (type->isFloatingPointTy()) 15097 weight = CW_SpecificReg; 15098 break; 15099 case 'y': 15100 if (type->isX86_MMXTy() && Subtarget->hasMMX()) 15101 weight = CW_SpecificReg; 15102 break; 15103 case 'x': 15104 case 'Y': 15105 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) || 15106 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasAVX())) 15107 weight = CW_Register; 15108 break; 15109 case 'I': 15110 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { 15111 if (C->getZExtValue() <= 31) 15112 weight = CW_Constant; 15113 } 15114 break; 15115 case 'J': 15116 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 15117 if (C->getZExtValue() <= 63) 15118 weight = CW_Constant; 15119 } 15120 break; 15121 case 'K': 15122 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 15123 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f)) 15124 weight = CW_Constant; 15125 } 15126 break; 15127 case 'L': 15128 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 15129 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff)) 15130 weight = CW_Constant; 15131 } 15132 break; 15133 case 'M': 15134 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 15135 if (C->getZExtValue() <= 3) 15136 weight = CW_Constant; 15137 } 15138 break; 15139 case 'N': 15140 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 15141 if (C->getZExtValue() <= 0xff) 15142 weight = CW_Constant; 15143 } 15144 break; 15145 case 'G': 15146 case 'C': 15147 if (dyn_cast<ConstantFP>(CallOperandVal)) { 15148 weight = CW_Constant; 15149 } 15150 break; 15151 case 'e': 15152 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 15153 if ((C->getSExtValue() >= -0x80000000LL) && 15154 (C->getSExtValue() <= 0x7fffffffLL)) 15155 weight = CW_Constant; 15156 } 15157 break; 15158 case 'Z': 15159 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 15160 if (C->getZExtValue() <= 0xffffffff) 15161 weight = CW_Constant; 15162 } 15163 break; 15164 } 15165 return weight; 15166} 15167 15168/// LowerXConstraint - try to replace an X constraint, which matches anything, 15169/// with another that has more specific requirements based on the type of the 15170/// corresponding operand. 15171const char *X86TargetLowering:: 15172LowerXConstraint(EVT ConstraintVT) const { 15173 // FP X constraints get lowered to SSE1/2 registers if available, otherwise 15174 // 'f' like normal targets. 15175 if (ConstraintVT.isFloatingPoint()) { 15176 if (Subtarget->hasSSE2()) 15177 return "Y"; 15178 if (Subtarget->hasSSE1()) 15179 return "x"; 15180 } 15181 15182 return TargetLowering::LowerXConstraint(ConstraintVT); 15183} 15184 15185/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 15186/// vector. If it is invalid, don't add anything to Ops. 15187void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 15188 std::string &Constraint, 15189 std::vector<SDValue>&Ops, 15190 SelectionDAG &DAG) const { 15191 SDValue Result(0, 0); 15192 15193 // Only support length 1 constraints for now. 15194 if (Constraint.length() > 1) return; 15195 15196 char ConstraintLetter = Constraint[0]; 15197 switch (ConstraintLetter) { 15198 default: break; 15199 case 'I': 15200 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 15201 if (C->getZExtValue() <= 31) { 15202 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 15203 break; 15204 } 15205 } 15206 return; 15207 case 'J': 15208 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 15209 if (C->getZExtValue() <= 63) { 15210 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 15211 break; 15212 } 15213 } 15214 return; 15215 case 'K': 15216 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 15217 if ((int8_t)C->getSExtValue() == C->getSExtValue()) { 15218 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 15219 break; 15220 } 15221 } 15222 return; 15223 case 'N': 15224 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 15225 if (C->getZExtValue() <= 255) { 15226 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 15227 break; 15228 } 15229 } 15230 return; 15231 case 'e': { 15232 // 32-bit signed value 15233 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 15234 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 15235 C->getSExtValue())) { 15236 // Widen to 64 bits here to get it sign extended. 15237 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64); 15238 break; 15239 } 15240 // FIXME gcc accepts some relocatable values here too, but only in certain 15241 // memory models; it's complicated. 15242 } 15243 return; 15244 } 15245 case 'Z': { 15246 // 32-bit unsigned value 15247 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 15248 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 15249 C->getZExtValue())) { 15250 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 15251 break; 15252 } 15253 } 15254 // FIXME gcc accepts some relocatable values here too, but only in certain 15255 // memory models; it's complicated. 15256 return; 15257 } 15258 case 'i': { 15259 // Literal immediates are always ok. 15260 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 15261 // Widen to 64 bits here to get it sign extended. 15262 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64); 15263 break; 15264 } 15265 15266 // In any sort of PIC mode addresses need to be computed at runtime by 15267 // adding in a register or some sort of table lookup. These can't 15268 // be used as immediates. 15269 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC()) 15270 return; 15271 15272 // If we are in non-pic codegen mode, we allow the address of a global (with 15273 // an optional displacement) to be used with 'i'. 15274 GlobalAddressSDNode *GA = 0; 15275 int64_t Offset = 0; 15276 15277 // Match either (GA), (GA+C), (GA+C1+C2), etc. 15278 while (1) { 15279 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) { 15280 Offset += GA->getOffset(); 15281 break; 15282 } else if (Op.getOpcode() == ISD::ADD) { 15283 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 15284 Offset += C->getZExtValue(); 15285 Op = Op.getOperand(0); 15286 continue; 15287 } 15288 } else if (Op.getOpcode() == ISD::SUB) { 15289 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 15290 Offset += -C->getZExtValue(); 15291 Op = Op.getOperand(0); 15292 continue; 15293 } 15294 } 15295 15296 // Otherwise, this isn't something we can handle, reject it. 15297 return; 15298 } 15299 15300 const GlobalValue *GV = GA->getGlobal(); 15301 // If we require an extra load to get this address, as in PIC mode, we 15302 // can't accept it. 15303 if (isGlobalStubReference(Subtarget->ClassifyGlobalReference(GV, 15304 getTargetMachine()))) 15305 return; 15306 15307 Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), 15308 GA->getValueType(0), Offset); 15309 break; 15310 } 15311 } 15312 15313 if (Result.getNode()) { 15314 Ops.push_back(Result); 15315 return; 15316 } 15317 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 15318} 15319 15320std::pair<unsigned, const TargetRegisterClass*> 15321X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 15322 EVT VT) const { 15323 // First, see if this is a constraint that directly corresponds to an LLVM 15324 // register class. 15325 if (Constraint.size() == 1) { 15326 // GCC Constraint Letters 15327 switch (Constraint[0]) { 15328 default: break; 15329 // TODO: Slight differences here in allocation order and leaving 15330 // RIP in the class. Do they matter any more here than they do 15331 // in the normal allocation? 15332 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode. 15333 if (Subtarget->is64Bit()) { 15334 if (VT == MVT::i32 || VT == MVT::f32) 15335 return std::make_pair(0U, X86::GR32RegisterClass); 15336 else if (VT == MVT::i16) 15337 return std::make_pair(0U, X86::GR16RegisterClass); 15338 else if (VT == MVT::i8 || VT == MVT::i1) 15339 return std::make_pair(0U, X86::GR8RegisterClass); 15340 else if (VT == MVT::i64 || VT == MVT::f64) 15341 return std::make_pair(0U, X86::GR64RegisterClass); 15342 break; 15343 } 15344 // 32-bit fallthrough 15345 case 'Q': // Q_REGS 15346 if (VT == MVT::i32 || VT == MVT::f32) 15347 return std::make_pair(0U, X86::GR32_ABCDRegisterClass); 15348 else if (VT == MVT::i16) 15349 return std::make_pair(0U, X86::GR16_ABCDRegisterClass); 15350 else if (VT == MVT::i8 || VT == MVT::i1) 15351 return std::make_pair(0U, X86::GR8_ABCD_LRegisterClass); 15352 else if (VT == MVT::i64) 15353 return std::make_pair(0U, X86::GR64_ABCDRegisterClass); 15354 break; 15355 case 'r': // GENERAL_REGS 15356 case 'l': // INDEX_REGS 15357 if (VT == MVT::i8 || VT == MVT::i1) 15358 return std::make_pair(0U, X86::GR8RegisterClass); 15359 if (VT == MVT::i16) 15360 return std::make_pair(0U, X86::GR16RegisterClass); 15361 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit()) 15362 return std::make_pair(0U, X86::GR32RegisterClass); 15363 return std::make_pair(0U, X86::GR64RegisterClass); 15364 case 'R': // LEGACY_REGS 15365 if (VT == MVT::i8 || VT == MVT::i1) 15366 return std::make_pair(0U, X86::GR8_NOREXRegisterClass); 15367 if (VT == MVT::i16) 15368 return std::make_pair(0U, X86::GR16_NOREXRegisterClass); 15369 if (VT == MVT::i32 || !Subtarget->is64Bit()) 15370 return std::make_pair(0U, X86::GR32_NOREXRegisterClass); 15371 return std::make_pair(0U, X86::GR64_NOREXRegisterClass); 15372 case 'f': // FP Stack registers. 15373 // If SSE is enabled for this VT, use f80 to ensure the isel moves the 15374 // value to the correct fpstack register class. 15375 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) 15376 return std::make_pair(0U, X86::RFP32RegisterClass); 15377 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) 15378 return std::make_pair(0U, X86::RFP64RegisterClass); 15379 return std::make_pair(0U, X86::RFP80RegisterClass); 15380 case 'y': // MMX_REGS if MMX allowed. 15381 if (!Subtarget->hasMMX()) break; 15382 return std::make_pair(0U, X86::VR64RegisterClass); 15383 case 'Y': // SSE_REGS if SSE2 allowed 15384 if (!Subtarget->hasSSE2()) break; 15385 // FALL THROUGH. 15386 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed 15387 if (!Subtarget->hasSSE1()) break; 15388 15389 switch (VT.getSimpleVT().SimpleTy) { 15390 default: break; 15391 // Scalar SSE types. 15392 case MVT::f32: 15393 case MVT::i32: 15394 return std::make_pair(0U, X86::FR32RegisterClass); 15395 case MVT::f64: 15396 case MVT::i64: 15397 return std::make_pair(0U, X86::FR64RegisterClass); 15398 // Vector types. 15399 case MVT::v16i8: 15400 case MVT::v8i16: 15401 case MVT::v4i32: 15402 case MVT::v2i64: 15403 case MVT::v4f32: 15404 case MVT::v2f64: 15405 return std::make_pair(0U, X86::VR128RegisterClass); 15406 // AVX types. 15407 case MVT::v32i8: 15408 case MVT::v16i16: 15409 case MVT::v8i32: 15410 case MVT::v4i64: 15411 case MVT::v8f32: 15412 case MVT::v4f64: 15413 return std::make_pair(0U, X86::VR256RegisterClass); 15414 15415 } 15416 break; 15417 } 15418 } 15419 15420 // Use the default implementation in TargetLowering to convert the register 15421 // constraint into a member of a register class. 15422 std::pair<unsigned, const TargetRegisterClass*> Res; 15423 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 15424 15425 // Not found as a standard register? 15426 if (Res.second == 0) { 15427 // Map st(0) -> st(7) -> ST0 15428 if (Constraint.size() == 7 && Constraint[0] == '{' && 15429 tolower(Constraint[1]) == 's' && 15430 tolower(Constraint[2]) == 't' && 15431 Constraint[3] == '(' && 15432 (Constraint[4] >= '0' && Constraint[4] <= '7') && 15433 Constraint[5] == ')' && 15434 Constraint[6] == '}') { 15435 15436 Res.first = X86::ST0+Constraint[4]-'0'; 15437 Res.second = X86::RFP80RegisterClass; 15438 return Res; 15439 } 15440 15441 // GCC allows "st(0)" to be called just plain "st". 15442 if (StringRef("{st}").equals_lower(Constraint)) { 15443 Res.first = X86::ST0; 15444 Res.second = X86::RFP80RegisterClass; 15445 return Res; 15446 } 15447 15448 // flags -> EFLAGS 15449 if (StringRef("{flags}").equals_lower(Constraint)) { 15450 Res.first = X86::EFLAGS; 15451 Res.second = X86::CCRRegisterClass; 15452 return Res; 15453 } 15454 15455 // 'A' means EAX + EDX. 15456 if (Constraint == "A") { 15457 Res.first = X86::EAX; 15458 Res.second = X86::GR32_ADRegisterClass; 15459 return Res; 15460 } 15461 return Res; 15462 } 15463 15464 // Otherwise, check to see if this is a register class of the wrong value 15465 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 15466 // turn into {ax},{dx}. 15467 if (Res.second->hasType(VT)) 15468 return Res; // Correct type already, nothing to do. 15469 15470 // All of the single-register GCC register classes map their values onto 15471 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 15472 // really want an 8-bit or 32-bit register, map to the appropriate register 15473 // class and return the appropriate register. 15474 if (Res.second == X86::GR16RegisterClass) { 15475 if (VT == MVT::i8) { 15476 unsigned DestReg = 0; 15477 switch (Res.first) { 15478 default: break; 15479 case X86::AX: DestReg = X86::AL; break; 15480 case X86::DX: DestReg = X86::DL; break; 15481 case X86::CX: DestReg = X86::CL; break; 15482 case X86::BX: DestReg = X86::BL; break; 15483 } 15484 if (DestReg) { 15485 Res.first = DestReg; 15486 Res.second = X86::GR8RegisterClass; 15487 } 15488 } else if (VT == MVT::i32) { 15489 unsigned DestReg = 0; 15490 switch (Res.first) { 15491 default: break; 15492 case X86::AX: DestReg = X86::EAX; break; 15493 case X86::DX: DestReg = X86::EDX; break; 15494 case X86::CX: DestReg = X86::ECX; break; 15495 case X86::BX: DestReg = X86::EBX; break; 15496 case X86::SI: DestReg = X86::ESI; break; 15497 case X86::DI: DestReg = X86::EDI; break; 15498 case X86::BP: DestReg = X86::EBP; break; 15499 case X86::SP: DestReg = X86::ESP; break; 15500 } 15501 if (DestReg) { 15502 Res.first = DestReg; 15503 Res.second = X86::GR32RegisterClass; 15504 } 15505 } else if (VT == MVT::i64) { 15506 unsigned DestReg = 0; 15507 switch (Res.first) { 15508 default: break; 15509 case X86::AX: DestReg = X86::RAX; break; 15510 case X86::DX: DestReg = X86::RDX; break; 15511 case X86::CX: DestReg = X86::RCX; break; 15512 case X86::BX: DestReg = X86::RBX; break; 15513 case X86::SI: DestReg = X86::RSI; break; 15514 case X86::DI: DestReg = X86::RDI; break; 15515 case X86::BP: DestReg = X86::RBP; break; 15516 case X86::SP: DestReg = X86::RSP; break; 15517 } 15518 if (DestReg) { 15519 Res.first = DestReg; 15520 Res.second = X86::GR64RegisterClass; 15521 } 15522 } 15523 } else if (Res.second == X86::FR32RegisterClass || 15524 Res.second == X86::FR64RegisterClass || 15525 Res.second == X86::VR128RegisterClass) { 15526 // Handle references to XMM physical registers that got mapped into the 15527 // wrong class. This can happen with constraints like {xmm0} where the 15528 // target independent register mapper will just pick the first match it can 15529 // find, ignoring the required type. 15530 if (VT == MVT::f32) 15531 Res.second = X86::FR32RegisterClass; 15532 else if (VT == MVT::f64) 15533 Res.second = X86::FR64RegisterClass; 15534 else if (X86::VR128RegisterClass->hasType(VT)) 15535 Res.second = X86::VR128RegisterClass; 15536 } 15537 15538 return Res; 15539} 15540