X86ISelLowering.cpp revision b151a646188fa180f31529b4c15a910a577d1041
19e3e86fda0eeb429c81de5123716d0fc5c9a0a5dBen Gruver//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 29e3e86fda0eeb429c81de5123716d0fc5c9a0a5dBen Gruver// 39e3e86fda0eeb429c81de5123716d0fc5c9a0a5dBen Gruver// The LLVM Compiler Infrastructure 49e3e86fda0eeb429c81de5123716d0fc5c9a0a5dBen Gruver// 59e3e86fda0eeb429c81de5123716d0fc5c9a0a5dBen Gruver// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86ISelLowering.h" 17#include "X86.h" 18#include "X86InstrBuilder.h" 19#include "X86TargetMachine.h" 20#include "X86TargetObjectFile.h" 21#include "Utils/X86ShuffleDecode.h" 22#include "llvm/CallingConv.h" 23#include "llvm/Constants.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalAlias.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/Function.h" 28#include "llvm/Instructions.h" 29#include "llvm/Intrinsics.h" 30#include "llvm/LLVMContext.h" 31#include "llvm/CodeGen/IntrinsicLowering.h" 32#include "llvm/CodeGen/MachineFrameInfo.h" 33#include "llvm/CodeGen/MachineFunction.h" 34#include "llvm/CodeGen/MachineInstrBuilder.h" 35#include "llvm/CodeGen/MachineJumpTableInfo.h" 36#include "llvm/CodeGen/MachineModuleInfo.h" 37#include "llvm/CodeGen/MachineRegisterInfo.h" 38#include "llvm/MC/MCAsmInfo.h" 39#include "llvm/MC/MCContext.h" 40#include "llvm/MC/MCExpr.h" 41#include "llvm/MC/MCSymbol.h" 42#include "llvm/ADT/SmallSet.h" 43#include "llvm/ADT/Statistic.h" 44#include "llvm/ADT/StringExtras.h" 45#include "llvm/ADT/VariadicFunction.h" 46#include "llvm/Support/CallSite.h" 47#include "llvm/Support/Debug.h" 48#include "llvm/Support/ErrorHandling.h" 49#include "llvm/Support/MathExtras.h" 50#include "llvm/Target/TargetOptions.h" 51#include <bitset> 52#include <cctype> 53using namespace llvm; 54 55STATISTIC(NumTailCalls, "Number of tail calls"); 56 57// Forward declarations. 58static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 59 SDValue V2); 60 61/// Generate a DAG to grab 128-bits from a vector > 128 bits. This 62/// sets things up to match to an AVX VEXTRACTF128 instruction or a 63/// simple subregister reference. Idx is an index in the 128 bits we 64/// want. It need not be aligned to a 128-bit bounday. That makes 65/// lowering EXTRACT_VECTOR_ELT operations easier. 66static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, 67 SelectionDAG &DAG, DebugLoc dl) { 68 EVT VT = Vec.getValueType(); 69 assert(VT.is256BitVector() && "Unexpected vector size!"); 70 EVT ElVT = VT.getVectorElementType(); 71 unsigned Factor = VT.getSizeInBits()/128; 72 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT, 73 VT.getVectorNumElements()/Factor); 74 75 // Extract from UNDEF is UNDEF. 76 if (Vec.getOpcode() == ISD::UNDEF) 77 return DAG.getUNDEF(ResultVT); 78 79 // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR 80 // we can match to VEXTRACTF128. 81 unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits(); 82 83 // This is the index of the first element of the 128-bit chunk 84 // we want. 85 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128) 86 * ElemsPerChunk); 87 88 SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32); 89 SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, 90 VecIdx); 91 92 return Result; 93} 94 95/// Generate a DAG to put 128-bits into a vector > 128 bits. This 96/// sets things up to match to an AVX VINSERTF128 instruction or a 97/// simple superregister reference. Idx is an index in the 128 bits 98/// we want. It need not be aligned to a 128-bit bounday. That makes 99/// lowering INSERT_VECTOR_ELT operations easier. 100static SDValue Insert128BitVector(SDValue Result, SDValue Vec, 101 unsigned IdxVal, SelectionDAG &DAG, 102 DebugLoc dl) { 103 // Inserting UNDEF is Result 104 if (Vec.getOpcode() == ISD::UNDEF) 105 return Result; 106 107 EVT VT = Vec.getValueType(); 108 assert(VT.is128BitVector() && "Unexpected vector size!"); 109 110 EVT ElVT = VT.getVectorElementType(); 111 EVT ResultVT = Result.getValueType(); 112 113 // Insert the relevant 128 bits. 114 unsigned ElemsPerChunk = 128/ElVT.getSizeInBits(); 115 116 // This is the index of the first element of the 128-bit chunk 117 // we want. 118 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128) 119 * ElemsPerChunk); 120 121 SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32); 122 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, 123 VecIdx); 124} 125 126/// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128 127/// instructions. This is used because creating CONCAT_VECTOR nodes of 128/// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower 129/// large BUILD_VECTORS. 130static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT, 131 unsigned NumElems, SelectionDAG &DAG, 132 DebugLoc dl) { 133 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl); 134 return Insert128BitVector(V, V2, NumElems/2, DAG, dl); 135} 136 137static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { 138 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 139 bool is64Bit = Subtarget->is64Bit(); 140 141 if (Subtarget->isTargetEnvMacho()) { 142 if (is64Bit) 143 return new X86_64MachoTargetObjectFile(); 144 return new TargetLoweringObjectFileMachO(); 145 } 146 147 if (Subtarget->isTargetLinux()) 148 return new X86LinuxTargetObjectFile(); 149 if (Subtarget->isTargetELF()) 150 return new TargetLoweringObjectFileELF(); 151 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 152 return new TargetLoweringObjectFileCOFF(); 153 llvm_unreachable("unknown subtarget type"); 154} 155 156X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) 157 : TargetLowering(TM, createTLOF(TM)) { 158 Subtarget = &TM.getSubtarget<X86Subtarget>(); 159 X86ScalarSSEf64 = Subtarget->hasSSE2(); 160 X86ScalarSSEf32 = Subtarget->hasSSE1(); 161 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 162 163 RegInfo = TM.getRegisterInfo(); 164 TD = getTargetData(); 165 166 // Set up the TargetLowering object. 167 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }; 168 169 // X86 is weird, it always uses i8 for shift amounts and setcc results. 170 setBooleanContents(ZeroOrOneBooleanContent); 171 // X86-SSE is even stranger. It uses -1 or 0 for vector masks. 172 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 173 174 // For 64-bit since we have so many registers use the ILP scheduler, for 175 // 32-bit code use the register pressure specific scheduling. 176 // For Atom, always use ILP scheduling. 177 if (Subtarget->isAtom()) 178 setSchedulingPreference(Sched::ILP); 179 else if (Subtarget->is64Bit()) 180 setSchedulingPreference(Sched::ILP); 181 else 182 setSchedulingPreference(Sched::RegPressure); 183 setStackPointerRegisterToSaveRestore(X86StackPtr); 184 185 if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) { 186 // Setup Windows compiler runtime calls. 187 setLibcallName(RTLIB::SDIV_I64, "_alldiv"); 188 setLibcallName(RTLIB::UDIV_I64, "_aulldiv"); 189 setLibcallName(RTLIB::SREM_I64, "_allrem"); 190 setLibcallName(RTLIB::UREM_I64, "_aullrem"); 191 setLibcallName(RTLIB::MUL_I64, "_allmul"); 192 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall); 193 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall); 194 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall); 195 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall); 196 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall); 197 198 // The _ftol2 runtime function has an unusual calling conv, which 199 // is modeled by a special pseudo-instruction. 200 setLibcallName(RTLIB::FPTOUINT_F64_I64, 0); 201 setLibcallName(RTLIB::FPTOUINT_F32_I64, 0); 202 setLibcallName(RTLIB::FPTOUINT_F64_I32, 0); 203 setLibcallName(RTLIB::FPTOUINT_F32_I32, 0); 204 } 205 206 if (Subtarget->isTargetDarwin()) { 207 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 208 setUseUnderscoreSetJmp(false); 209 setUseUnderscoreLongJmp(false); 210 } else if (Subtarget->isTargetMingw()) { 211 // MS runtime is weird: it exports _setjmp, but longjmp! 212 setUseUnderscoreSetJmp(true); 213 setUseUnderscoreLongJmp(false); 214 } else { 215 setUseUnderscoreSetJmp(true); 216 setUseUnderscoreLongJmp(true); 217 } 218 219 // Set up the register classes. 220 addRegisterClass(MVT::i8, &X86::GR8RegClass); 221 addRegisterClass(MVT::i16, &X86::GR16RegClass); 222 addRegisterClass(MVT::i32, &X86::GR32RegClass); 223 if (Subtarget->is64Bit()) 224 addRegisterClass(MVT::i64, &X86::GR64RegClass); 225 226 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 227 228 // We don't accept any truncstore of integer registers. 229 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 230 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 231 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 232 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 233 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 234 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 235 236 // SETOEQ and SETUNE require checking two conditions. 237 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); 238 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); 239 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand); 240 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand); 241 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); 242 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand); 243 244 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 245 // operation. 246 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 247 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 248 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 249 250 if (Subtarget->is64Bit()) { 251 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 252 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 253 } else if (!TM.Options.UseSoftFloat) { 254 // We have an algorithm for SSE2->double, and we turn this into a 255 // 64-bit FILD followed by conditional FADD for other targets. 256 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 257 // We have an algorithm for SSE2, and we turn this into a 64-bit 258 // FILD for other targets. 259 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom); 260 } 261 262 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 263 // this operation. 264 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 265 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 266 267 if (!TM.Options.UseSoftFloat) { 268 // SSE has no i16 to fp conversion, only i32 269 if (X86ScalarSSEf32) { 270 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 271 // f32 and f64 cases are Legal, f80 case is not 272 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 273 } else { 274 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 275 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 276 } 277 } else { 278 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 279 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote); 280 } 281 282 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 283 // are Legal, f80 is custom lowered. 284 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 285 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 286 287 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 288 // this operation. 289 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 290 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 291 292 if (X86ScalarSSEf32) { 293 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 294 // f32 and f64 cases are Legal, f80 case is not 295 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 296 } else { 297 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 298 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 299 } 300 301 // Handle FP_TO_UINT by promoting the destination to a larger signed 302 // conversion. 303 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 304 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 305 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 306 307 if (Subtarget->is64Bit()) { 308 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 309 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 310 } else if (!TM.Options.UseSoftFloat) { 311 // Since AVX is a superset of SSE3, only check for SSE here. 312 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3()) 313 // Expand FP_TO_UINT into a select. 314 // FIXME: We would like to use a Custom expander here eventually to do 315 // the optimal thing for SSE vs. the default expansion in the legalizer. 316 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 317 else 318 // With SSE3 we can use fisttpll to convert to a signed i64; without 319 // SSE, we're stuck with a fistpll. 320 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom); 321 } 322 323 if (isTargetFTOL()) { 324 // Use the _ftol2 runtime function, which has a pseudo-instruction 325 // to handle its weird calling convention. 326 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom); 327 } 328 329 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 330 if (!X86ScalarSSEf64) { 331 setOperationAction(ISD::BITCAST , MVT::f32 , Expand); 332 setOperationAction(ISD::BITCAST , MVT::i32 , Expand); 333 if (Subtarget->is64Bit()) { 334 setOperationAction(ISD::BITCAST , MVT::f64 , Expand); 335 // Without SSE, i64->f64 goes through memory. 336 setOperationAction(ISD::BITCAST , MVT::i64 , Expand); 337 } 338 } 339 340 // Scalar integer divide and remainder are lowered to use operations that 341 // produce two results, to match the available instructions. This exposes 342 // the two-result form to trivial CSE, which is able to combine x/y and x%y 343 // into a single instruction. 344 // 345 // Scalar integer multiply-high is also lowered to use two-result 346 // operations, to match the available instructions. However, plain multiply 347 // (low) operations are left as Legal, as there are single-result 348 // instructions for this in x86. Using the two-result multiply instructions 349 // when both high and low results are needed must be arranged by dagcombine. 350 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { 351 MVT VT = IntVTs[i]; 352 setOperationAction(ISD::MULHS, VT, Expand); 353 setOperationAction(ISD::MULHU, VT, Expand); 354 setOperationAction(ISD::SDIV, VT, Expand); 355 setOperationAction(ISD::UDIV, VT, Expand); 356 setOperationAction(ISD::SREM, VT, Expand); 357 setOperationAction(ISD::UREM, VT, Expand); 358 359 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences. 360 setOperationAction(ISD::ADDC, VT, Custom); 361 setOperationAction(ISD::ADDE, VT, Custom); 362 setOperationAction(ISD::SUBC, VT, Custom); 363 setOperationAction(ISD::SUBE, VT, Custom); 364 } 365 366 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 367 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 368 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 369 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 370 if (Subtarget->is64Bit()) 371 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 372 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 373 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 374 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 375 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 376 setOperationAction(ISD::FREM , MVT::f32 , Expand); 377 setOperationAction(ISD::FREM , MVT::f64 , Expand); 378 setOperationAction(ISD::FREM , MVT::f80 , Expand); 379 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 380 381 // Promote the i8 variants and force them on up to i32 which has a shorter 382 // encoding. 383 setOperationAction(ISD::CTTZ , MVT::i8 , Promote); 384 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32); 385 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote); 386 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32); 387 if (Subtarget->hasBMI()) { 388 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand); 389 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand); 390 if (Subtarget->is64Bit()) 391 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 392 } else { 393 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 394 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 395 if (Subtarget->is64Bit()) 396 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 397 } 398 399 if (Subtarget->hasLZCNT()) { 400 // When promoting the i8 variants, force them to i32 for a shorter 401 // encoding. 402 setOperationAction(ISD::CTLZ , MVT::i8 , Promote); 403 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32); 404 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote); 405 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32); 406 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand); 407 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand); 408 if (Subtarget->is64Bit()) 409 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 410 } else { 411 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 412 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 413 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 414 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom); 415 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom); 416 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom); 417 if (Subtarget->is64Bit()) { 418 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 419 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); 420 } 421 } 422 423 if (Subtarget->hasPOPCNT()) { 424 setOperationAction(ISD::CTPOP , MVT::i8 , Promote); 425 } else { 426 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 427 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 428 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 429 if (Subtarget->is64Bit()) 430 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 431 } 432 433 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 434 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 435 436 // These should be promoted to a larger select which is supported. 437 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 438 // X86 wants to expand cmov itself. 439 setOperationAction(ISD::SELECT , MVT::i8 , Custom); 440 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 441 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 442 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 443 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 444 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 445 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 446 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 447 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 448 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 449 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 450 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 451 if (Subtarget->is64Bit()) { 452 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 453 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 454 } 455 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 456 457 // Darwin ABI issue. 458 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 459 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 460 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 461 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 462 if (Subtarget->is64Bit()) 463 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 464 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 465 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom); 466 if (Subtarget->is64Bit()) { 467 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 468 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 469 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 470 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 471 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom); 472 } 473 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 474 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 475 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 476 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 477 if (Subtarget->is64Bit()) { 478 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 479 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 480 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 481 } 482 483 if (Subtarget->hasSSE1()) 484 setOperationAction(ISD::PREFETCH , MVT::Other, Legal); 485 486 setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom); 487 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom); 488 489 // On X86 and X86-64, atomic operations are lowered to locked instructions. 490 // Locked instructions, in turn, have implicit fence semantics (all memory 491 // operations are flushed before issuing the locked instruction, and they 492 // are not buffered), so we can fold away the common pattern of 493 // fence-atomic-fence. 494 setShouldFoldAtomicFences(true); 495 496 // Expand certain atomics 497 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { 498 MVT VT = IntVTs[i]; 499 setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom); 500 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 501 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 502 } 503 504 if (!Subtarget->is64Bit()) { 505 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); 506 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 507 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 508 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 509 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 510 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 511 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom); 512 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 513 } 514 515 if (Subtarget->hasCmpxchg16b()) { 516 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom); 517 } 518 519 // FIXME - use subtarget debug flags 520 if (!Subtarget->isTargetDarwin() && 521 !Subtarget->isTargetELF() && 522 !Subtarget->isTargetCygMing()) { 523 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); 524 } 525 526 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 527 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 528 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 529 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 530 if (Subtarget->is64Bit()) { 531 setExceptionPointerRegister(X86::RAX); 532 setExceptionSelectorRegister(X86::RDX); 533 } else { 534 setExceptionPointerRegister(X86::EAX); 535 setExceptionSelectorRegister(X86::EDX); 536 } 537 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 538 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom); 539 540 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 541 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 542 543 setOperationAction(ISD::TRAP, MVT::Other, Legal); 544 545 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 546 setOperationAction(ISD::VASTART , MVT::Other, Custom); 547 setOperationAction(ISD::VAEND , MVT::Other, Expand); 548 if (Subtarget->is64Bit()) { 549 setOperationAction(ISD::VAARG , MVT::Other, Custom); 550 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 551 } else { 552 setOperationAction(ISD::VAARG , MVT::Other, Expand); 553 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 554 } 555 556 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 557 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 558 559 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 560 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 561 MVT::i64 : MVT::i32, Custom); 562 else if (TM.Options.EnableSegmentedStacks) 563 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 564 MVT::i64 : MVT::i32, Custom); 565 else 566 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 567 MVT::i64 : MVT::i32, Expand); 568 569 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) { 570 // f32 and f64 use SSE. 571 // Set up the FP register classes. 572 addRegisterClass(MVT::f32, &X86::FR32RegClass); 573 addRegisterClass(MVT::f64, &X86::FR64RegClass); 574 575 // Use ANDPD to simulate FABS. 576 setOperationAction(ISD::FABS , MVT::f64, Custom); 577 setOperationAction(ISD::FABS , MVT::f32, Custom); 578 579 // Use XORP to simulate FNEG. 580 setOperationAction(ISD::FNEG , MVT::f64, Custom); 581 setOperationAction(ISD::FNEG , MVT::f32, Custom); 582 583 // Use ANDPD and ORPD to simulate FCOPYSIGN. 584 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 585 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 586 587 // Lower this to FGETSIGNx86 plus an AND. 588 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom); 589 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom); 590 591 // We don't support sin/cos/fmod 592 setOperationAction(ISD::FSIN , MVT::f64, Expand); 593 setOperationAction(ISD::FCOS , MVT::f64, Expand); 594 setOperationAction(ISD::FSIN , MVT::f32, Expand); 595 setOperationAction(ISD::FCOS , MVT::f32, Expand); 596 597 // Expand FP immediates into loads from the stack, except for the special 598 // cases we handle. 599 addLegalFPImmediate(APFloat(+0.0)); // xorpd 600 addLegalFPImmediate(APFloat(+0.0f)); // xorps 601 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) { 602 // Use SSE for f32, x87 for f64. 603 // Set up the FP register classes. 604 addRegisterClass(MVT::f32, &X86::FR32RegClass); 605 addRegisterClass(MVT::f64, &X86::RFP64RegClass); 606 607 // Use ANDPS to simulate FABS. 608 setOperationAction(ISD::FABS , MVT::f32, Custom); 609 610 // Use XORP to simulate FNEG. 611 setOperationAction(ISD::FNEG , MVT::f32, Custom); 612 613 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 614 615 // Use ANDPS and ORPS to simulate FCOPYSIGN. 616 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 617 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 618 619 // We don't support sin/cos/fmod 620 setOperationAction(ISD::FSIN , MVT::f32, Expand); 621 setOperationAction(ISD::FCOS , MVT::f32, Expand); 622 623 // Special cases we handle for FP constants. 624 addLegalFPImmediate(APFloat(+0.0f)); // xorps 625 addLegalFPImmediate(APFloat(+0.0)); // FLD0 626 addLegalFPImmediate(APFloat(+1.0)); // FLD1 627 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 628 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 629 630 if (!TM.Options.UnsafeFPMath) { 631 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 632 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 633 } 634 } else if (!TM.Options.UseSoftFloat) { 635 // f32 and f64 in x87. 636 // Set up the FP register classes. 637 addRegisterClass(MVT::f64, &X86::RFP64RegClass); 638 addRegisterClass(MVT::f32, &X86::RFP32RegClass); 639 640 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 641 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 642 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 643 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 644 645 if (!TM.Options.UnsafeFPMath) { 646 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 647 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 648 } 649 addLegalFPImmediate(APFloat(+0.0)); // FLD0 650 addLegalFPImmediate(APFloat(+1.0)); // FLD1 651 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 652 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 653 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 654 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 655 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 656 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 657 } 658 659 // We don't support FMA. 660 setOperationAction(ISD::FMA, MVT::f64, Expand); 661 setOperationAction(ISD::FMA, MVT::f32, Expand); 662 663 // Long double always uses X87. 664 if (!TM.Options.UseSoftFloat) { 665 addRegisterClass(MVT::f80, &X86::RFP80RegClass); 666 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 667 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 668 { 669 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended); 670 addLegalFPImmediate(TmpFlt); // FLD0 671 TmpFlt.changeSign(); 672 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 673 674 bool ignored; 675 APFloat TmpFlt2(+1.0); 676 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven, 677 &ignored); 678 addLegalFPImmediate(TmpFlt2); // FLD1 679 TmpFlt2.changeSign(); 680 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 681 } 682 683 if (!TM.Options.UnsafeFPMath) { 684 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 685 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 686 } 687 688 setOperationAction(ISD::FFLOOR, MVT::f80, Expand); 689 setOperationAction(ISD::FCEIL, MVT::f80, Expand); 690 setOperationAction(ISD::FTRUNC, MVT::f80, Expand); 691 setOperationAction(ISD::FRINT, MVT::f80, Expand); 692 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand); 693 setOperationAction(ISD::FMA, MVT::f80, Expand); 694 } 695 696 // Always use a library call for pow. 697 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 698 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 699 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 700 701 setOperationAction(ISD::FLOG, MVT::f80, Expand); 702 setOperationAction(ISD::FLOG2, MVT::f80, Expand); 703 setOperationAction(ISD::FLOG10, MVT::f80, Expand); 704 setOperationAction(ISD::FEXP, MVT::f80, Expand); 705 setOperationAction(ISD::FEXP2, MVT::f80, Expand); 706 707 // First set operation action for all vector types to either promote 708 // (for widening) or expand (for scalarization). Then we will selectively 709 // turn on ones that can be effectively codegen'd. 710 for (int VT = MVT::FIRST_VECTOR_VALUETYPE; 711 VT <= MVT::LAST_VECTOR_VALUETYPE; ++VT) { 712 setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand); 713 setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand); 714 setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand); 715 setOperationAction(ISD::FNEG, (MVT::SimpleValueType)VT, Expand); 716 setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand); 717 setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand); 718 setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand); 719 setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand); 720 setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand); 721 setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand); 722 setOperationAction(ISD::SREM, (MVT::SimpleValueType)VT, Expand); 723 setOperationAction(ISD::UREM, (MVT::SimpleValueType)VT, Expand); 724 setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand); 725 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand); 726 setOperationAction(ISD::EXTRACT_VECTOR_ELT,(MVT::SimpleValueType)VT,Expand); 727 setOperationAction(ISD::INSERT_VECTOR_ELT,(MVT::SimpleValueType)VT, Expand); 728 setOperationAction(ISD::EXTRACT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 729 setOperationAction(ISD::INSERT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 730 setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand); 731 setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand); 732 setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand); 733 setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand); 734 setOperationAction(ISD::FMA, (MVT::SimpleValueType)VT, Expand); 735 setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand); 736 setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand); 737 setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand); 738 setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 739 setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 740 setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand); 741 setOperationAction(ISD::UDIVREM, (MVT::SimpleValueType)VT, Expand); 742 setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand); 743 setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand); 744 setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand); 745 setOperationAction(ISD::CTTZ_ZERO_UNDEF, (MVT::SimpleValueType)VT, Expand); 746 setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand); 747 setOperationAction(ISD::CTLZ_ZERO_UNDEF, (MVT::SimpleValueType)VT, Expand); 748 setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand); 749 setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand); 750 setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand); 751 setOperationAction(ISD::ROTL, (MVT::SimpleValueType)VT, Expand); 752 setOperationAction(ISD::ROTR, (MVT::SimpleValueType)VT, Expand); 753 setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand); 754 setOperationAction(ISD::SETCC, (MVT::SimpleValueType)VT, Expand); 755 setOperationAction(ISD::FLOG, (MVT::SimpleValueType)VT, Expand); 756 setOperationAction(ISD::FLOG2, (MVT::SimpleValueType)VT, Expand); 757 setOperationAction(ISD::FLOG10, (MVT::SimpleValueType)VT, Expand); 758 setOperationAction(ISD::FEXP, (MVT::SimpleValueType)VT, Expand); 759 setOperationAction(ISD::FEXP2, (MVT::SimpleValueType)VT, Expand); 760 setOperationAction(ISD::FP_TO_UINT, (MVT::SimpleValueType)VT, Expand); 761 setOperationAction(ISD::FP_TO_SINT, (MVT::SimpleValueType)VT, Expand); 762 setOperationAction(ISD::UINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 763 setOperationAction(ISD::SINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 764 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT,Expand); 765 setOperationAction(ISD::TRUNCATE, (MVT::SimpleValueType)VT, Expand); 766 setOperationAction(ISD::SIGN_EXTEND, (MVT::SimpleValueType)VT, Expand); 767 setOperationAction(ISD::ZERO_EXTEND, (MVT::SimpleValueType)VT, Expand); 768 setOperationAction(ISD::ANY_EXTEND, (MVT::SimpleValueType)VT, Expand); 769 setOperationAction(ISD::VSELECT, (MVT::SimpleValueType)VT, Expand); 770 for (int InnerVT = MVT::FIRST_VECTOR_VALUETYPE; 771 InnerVT <= MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 772 setTruncStoreAction((MVT::SimpleValueType)VT, 773 (MVT::SimpleValueType)InnerVT, Expand); 774 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); 775 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); 776 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); 777 } 778 779 // FIXME: In order to prevent SSE instructions being expanded to MMX ones 780 // with -msoft-float, disable use of MMX as well. 781 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) { 782 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass); 783 // No operations on x86mmx supported, everything uses intrinsics. 784 } 785 786 // MMX-sized vectors (other than x86mmx) are expected to be expanded 787 // into smaller operations. 788 setOperationAction(ISD::MULHS, MVT::v8i8, Expand); 789 setOperationAction(ISD::MULHS, MVT::v4i16, Expand); 790 setOperationAction(ISD::MULHS, MVT::v2i32, Expand); 791 setOperationAction(ISD::MULHS, MVT::v1i64, Expand); 792 setOperationAction(ISD::AND, MVT::v8i8, Expand); 793 setOperationAction(ISD::AND, MVT::v4i16, Expand); 794 setOperationAction(ISD::AND, MVT::v2i32, Expand); 795 setOperationAction(ISD::AND, MVT::v1i64, Expand); 796 setOperationAction(ISD::OR, MVT::v8i8, Expand); 797 setOperationAction(ISD::OR, MVT::v4i16, Expand); 798 setOperationAction(ISD::OR, MVT::v2i32, Expand); 799 setOperationAction(ISD::OR, MVT::v1i64, Expand); 800 setOperationAction(ISD::XOR, MVT::v8i8, Expand); 801 setOperationAction(ISD::XOR, MVT::v4i16, Expand); 802 setOperationAction(ISD::XOR, MVT::v2i32, Expand); 803 setOperationAction(ISD::XOR, MVT::v1i64, Expand); 804 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand); 805 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand); 806 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand); 807 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand); 808 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand); 809 setOperationAction(ISD::SELECT, MVT::v8i8, Expand); 810 setOperationAction(ISD::SELECT, MVT::v4i16, Expand); 811 setOperationAction(ISD::SELECT, MVT::v2i32, Expand); 812 setOperationAction(ISD::SELECT, MVT::v1i64, Expand); 813 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand); 814 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand); 815 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand); 816 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand); 817 818 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) { 819 addRegisterClass(MVT::v4f32, &X86::VR128RegClass); 820 821 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 822 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 823 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 824 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 825 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 826 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 827 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 828 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 829 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 830 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 831 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 832 setOperationAction(ISD::SETCC, MVT::v4f32, Custom); 833 } 834 835 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) { 836 addRegisterClass(MVT::v2f64, &X86::VR128RegClass); 837 838 // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM 839 // registers cannot be used even for integer operations. 840 addRegisterClass(MVT::v16i8, &X86::VR128RegClass); 841 addRegisterClass(MVT::v8i16, &X86::VR128RegClass); 842 addRegisterClass(MVT::v4i32, &X86::VR128RegClass); 843 addRegisterClass(MVT::v2i64, &X86::VR128RegClass); 844 845 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 846 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 847 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 848 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 849 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 850 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 851 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 852 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 853 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 854 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 855 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 856 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 857 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 858 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 859 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 860 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 861 862 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 863 setOperationAction(ISD::SETCC, MVT::v16i8, Custom); 864 setOperationAction(ISD::SETCC, MVT::v8i16, Custom); 865 setOperationAction(ISD::SETCC, MVT::v4i32, Custom); 866 867 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 868 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 869 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 870 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 871 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 872 873 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Custom); 874 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Custom); 875 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Custom); 876 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Custom); 877 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom); 878 879 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 880 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) { 881 MVT VT = (MVT::SimpleValueType)i; 882 // Do not attempt to custom lower non-power-of-2 vectors 883 if (!isPowerOf2_32(VT.getVectorNumElements())) 884 continue; 885 // Do not attempt to custom lower non-128-bit vectors 886 if (!VT.is128BitVector()) 887 continue; 888 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 889 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 890 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 891 } 892 893 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 894 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 895 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 896 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 897 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 898 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 899 900 if (Subtarget->is64Bit()) { 901 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 902 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 903 } 904 905 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 906 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) { 907 MVT VT = (MVT::SimpleValueType)i; 908 909 // Do not attempt to promote non-128-bit vectors 910 if (!VT.is128BitVector()) 911 continue; 912 913 setOperationAction(ISD::AND, VT, Promote); 914 AddPromotedToType (ISD::AND, VT, MVT::v2i64); 915 setOperationAction(ISD::OR, VT, Promote); 916 AddPromotedToType (ISD::OR, VT, MVT::v2i64); 917 setOperationAction(ISD::XOR, VT, Promote); 918 AddPromotedToType (ISD::XOR, VT, MVT::v2i64); 919 setOperationAction(ISD::LOAD, VT, Promote); 920 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64); 921 setOperationAction(ISD::SELECT, VT, Promote); 922 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64); 923 } 924 925 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 926 927 // Custom lower v2i64 and v2f64 selects. 928 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 929 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 930 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 931 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 932 933 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 934 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 935 } 936 937 if (Subtarget->hasSSE41()) { 938 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 939 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 940 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 941 setOperationAction(ISD::FRINT, MVT::f32, Legal); 942 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 943 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 944 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 945 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 946 setOperationAction(ISD::FRINT, MVT::f64, Legal); 947 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 948 949 // FIXME: Do we need to handle scalar-to-vector here? 950 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 951 952 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 953 setOperationAction(ISD::VSELECT, MVT::v2i64, Legal); 954 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 955 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 956 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 957 958 // i8 and i16 vectors are custom , because the source register and source 959 // source memory operand types are not the same width. f32 vectors are 960 // custom since the immediate controlling the insert encodes additional 961 // information. 962 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 963 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 964 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 965 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 966 967 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 968 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 969 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom); 970 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 971 972 // FIXME: these should be Legal but thats only for the case where 973 // the index is constant. For now custom expand to deal with that. 974 if (Subtarget->is64Bit()) { 975 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 976 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 977 } 978 } 979 980 if (Subtarget->hasSSE2()) { 981 setOperationAction(ISD::SRL, MVT::v8i16, Custom); 982 setOperationAction(ISD::SRL, MVT::v16i8, Custom); 983 984 setOperationAction(ISD::SHL, MVT::v8i16, Custom); 985 setOperationAction(ISD::SHL, MVT::v16i8, Custom); 986 987 setOperationAction(ISD::SRA, MVT::v8i16, Custom); 988 setOperationAction(ISD::SRA, MVT::v16i8, Custom); 989 990 if (Subtarget->hasAVX2()) { 991 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 992 setOperationAction(ISD::SRL, MVT::v4i32, Legal); 993 994 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 995 setOperationAction(ISD::SHL, MVT::v4i32, Legal); 996 997 setOperationAction(ISD::SRA, MVT::v4i32, Legal); 998 } else { 999 setOperationAction(ISD::SRL, MVT::v2i64, Custom); 1000 setOperationAction(ISD::SRL, MVT::v4i32, Custom); 1001 1002 setOperationAction(ISD::SHL, MVT::v2i64, Custom); 1003 setOperationAction(ISD::SHL, MVT::v4i32, Custom); 1004 1005 setOperationAction(ISD::SRA, MVT::v4i32, Custom); 1006 } 1007 } 1008 1009 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX()) { 1010 addRegisterClass(MVT::v32i8, &X86::VR256RegClass); 1011 addRegisterClass(MVT::v16i16, &X86::VR256RegClass); 1012 addRegisterClass(MVT::v8i32, &X86::VR256RegClass); 1013 addRegisterClass(MVT::v8f32, &X86::VR256RegClass); 1014 addRegisterClass(MVT::v4i64, &X86::VR256RegClass); 1015 addRegisterClass(MVT::v4f64, &X86::VR256RegClass); 1016 1017 setOperationAction(ISD::LOAD, MVT::v8f32, Legal); 1018 setOperationAction(ISD::LOAD, MVT::v4f64, Legal); 1019 setOperationAction(ISD::LOAD, MVT::v4i64, Legal); 1020 1021 setOperationAction(ISD::FADD, MVT::v8f32, Legal); 1022 setOperationAction(ISD::FSUB, MVT::v8f32, Legal); 1023 setOperationAction(ISD::FMUL, MVT::v8f32, Legal); 1024 setOperationAction(ISD::FDIV, MVT::v8f32, Legal); 1025 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal); 1026 setOperationAction(ISD::FNEG, MVT::v8f32, Custom); 1027 1028 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 1029 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 1030 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 1031 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 1032 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 1033 setOperationAction(ISD::FNEG, MVT::v4f64, Custom); 1034 1035 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal); 1036 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal); 1037 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal); 1038 1039 setOperationAction(ISD::SRL, MVT::v16i16, Custom); 1040 setOperationAction(ISD::SRL, MVT::v32i8, Custom); 1041 1042 setOperationAction(ISD::SHL, MVT::v16i16, Custom); 1043 setOperationAction(ISD::SHL, MVT::v32i8, Custom); 1044 1045 setOperationAction(ISD::SRA, MVT::v16i16, Custom); 1046 setOperationAction(ISD::SRA, MVT::v32i8, Custom); 1047 1048 setOperationAction(ISD::SETCC, MVT::v32i8, Custom); 1049 setOperationAction(ISD::SETCC, MVT::v16i16, Custom); 1050 setOperationAction(ISD::SETCC, MVT::v8i32, Custom); 1051 setOperationAction(ISD::SETCC, MVT::v4i64, Custom); 1052 1053 setOperationAction(ISD::SELECT, MVT::v4f64, Custom); 1054 setOperationAction(ISD::SELECT, MVT::v4i64, Custom); 1055 setOperationAction(ISD::SELECT, MVT::v8f32, Custom); 1056 1057 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 1058 setOperationAction(ISD::VSELECT, MVT::v4i64, Legal); 1059 setOperationAction(ISD::VSELECT, MVT::v8i32, Legal); 1060 setOperationAction(ISD::VSELECT, MVT::v8f32, Legal); 1061 1062 if (Subtarget->hasFMA()) { 1063 setOperationAction(ISD::FMA, MVT::v8f32, Custom); 1064 setOperationAction(ISD::FMA, MVT::v4f64, Custom); 1065 setOperationAction(ISD::FMA, MVT::v4f32, Custom); 1066 setOperationAction(ISD::FMA, MVT::v2f64, Custom); 1067 setOperationAction(ISD::FMA, MVT::f32, Custom); 1068 setOperationAction(ISD::FMA, MVT::f64, Custom); 1069 } 1070 1071 if (Subtarget->hasAVX2()) { 1072 setOperationAction(ISD::ADD, MVT::v4i64, Legal); 1073 setOperationAction(ISD::ADD, MVT::v8i32, Legal); 1074 setOperationAction(ISD::ADD, MVT::v16i16, Legal); 1075 setOperationAction(ISD::ADD, MVT::v32i8, Legal); 1076 1077 setOperationAction(ISD::SUB, MVT::v4i64, Legal); 1078 setOperationAction(ISD::SUB, MVT::v8i32, Legal); 1079 setOperationAction(ISD::SUB, MVT::v16i16, Legal); 1080 setOperationAction(ISD::SUB, MVT::v32i8, Legal); 1081 1082 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1083 setOperationAction(ISD::MUL, MVT::v8i32, Legal); 1084 setOperationAction(ISD::MUL, MVT::v16i16, Legal); 1085 // Don't lower v32i8 because there is no 128-bit byte mul 1086 1087 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal); 1088 1089 setOperationAction(ISD::SRL, MVT::v4i64, Legal); 1090 setOperationAction(ISD::SRL, MVT::v8i32, Legal); 1091 1092 setOperationAction(ISD::SHL, MVT::v4i64, Legal); 1093 setOperationAction(ISD::SHL, MVT::v8i32, Legal); 1094 1095 setOperationAction(ISD::SRA, MVT::v8i32, Legal); 1096 } else { 1097 setOperationAction(ISD::ADD, MVT::v4i64, Custom); 1098 setOperationAction(ISD::ADD, MVT::v8i32, Custom); 1099 setOperationAction(ISD::ADD, MVT::v16i16, Custom); 1100 setOperationAction(ISD::ADD, MVT::v32i8, Custom); 1101 1102 setOperationAction(ISD::SUB, MVT::v4i64, Custom); 1103 setOperationAction(ISD::SUB, MVT::v8i32, Custom); 1104 setOperationAction(ISD::SUB, MVT::v16i16, Custom); 1105 setOperationAction(ISD::SUB, MVT::v32i8, Custom); 1106 1107 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1108 setOperationAction(ISD::MUL, MVT::v8i32, Custom); 1109 setOperationAction(ISD::MUL, MVT::v16i16, Custom); 1110 // Don't lower v32i8 because there is no 128-bit byte mul 1111 1112 setOperationAction(ISD::SRL, MVT::v4i64, Custom); 1113 setOperationAction(ISD::SRL, MVT::v8i32, Custom); 1114 1115 setOperationAction(ISD::SHL, MVT::v4i64, Custom); 1116 setOperationAction(ISD::SHL, MVT::v8i32, Custom); 1117 1118 setOperationAction(ISD::SRA, MVT::v8i32, Custom); 1119 } 1120 1121 // Custom lower several nodes for 256-bit types. 1122 for (int i = MVT::FIRST_VECTOR_VALUETYPE; 1123 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { 1124 MVT VT = (MVT::SimpleValueType)i; 1125 1126 // Extract subvector is special because the value type 1127 // (result) is 128-bit but the source is 256-bit wide. 1128 if (VT.is128BitVector()) 1129 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 1130 1131 // Do not attempt to custom lower other non-256-bit vectors 1132 if (!VT.is256BitVector()) 1133 continue; 1134 1135 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 1136 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 1137 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 1138 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 1139 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); 1140 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 1141 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 1142 } 1143 1144 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64. 1145 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) { 1146 MVT VT = (MVT::SimpleValueType)i; 1147 1148 // Do not attempt to promote non-256-bit vectors 1149 if (!VT.is256BitVector()) 1150 continue; 1151 1152 setOperationAction(ISD::AND, VT, Promote); 1153 AddPromotedToType (ISD::AND, VT, MVT::v4i64); 1154 setOperationAction(ISD::OR, VT, Promote); 1155 AddPromotedToType (ISD::OR, VT, MVT::v4i64); 1156 setOperationAction(ISD::XOR, VT, Promote); 1157 AddPromotedToType (ISD::XOR, VT, MVT::v4i64); 1158 setOperationAction(ISD::LOAD, VT, Promote); 1159 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64); 1160 setOperationAction(ISD::SELECT, VT, Promote); 1161 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64); 1162 } 1163 } 1164 1165 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion 1166 // of this type with custom code. 1167 for (int VT = MVT::FIRST_VECTOR_VALUETYPE; 1168 VT != MVT::LAST_VECTOR_VALUETYPE; VT++) { 1169 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT, 1170 Custom); 1171 } 1172 1173 // We want to custom lower some of our intrinsics. 1174 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 1175 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 1176 1177 1178 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't 1179 // handle type legalization for these operations here. 1180 // 1181 // FIXME: We really should do custom legalization for addition and 1182 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better 1183 // than generic legalization for 64-bit multiplication-with-overflow, though. 1184 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) { 1185 // Add/Sub/Mul with overflow operations are custom lowered. 1186 MVT VT = IntVTs[i]; 1187 setOperationAction(ISD::SADDO, VT, Custom); 1188 setOperationAction(ISD::UADDO, VT, Custom); 1189 setOperationAction(ISD::SSUBO, VT, Custom); 1190 setOperationAction(ISD::USUBO, VT, Custom); 1191 setOperationAction(ISD::SMULO, VT, Custom); 1192 setOperationAction(ISD::UMULO, VT, Custom); 1193 } 1194 1195 // There are no 8-bit 3-address imul/mul instructions 1196 setOperationAction(ISD::SMULO, MVT::i8, Expand); 1197 setOperationAction(ISD::UMULO, MVT::i8, Expand); 1198 1199 if (!Subtarget->is64Bit()) { 1200 // These libcalls are not available in 32-bit. 1201 setLibcallName(RTLIB::SHL_I128, 0); 1202 setLibcallName(RTLIB::SRL_I128, 0); 1203 setLibcallName(RTLIB::SRA_I128, 0); 1204 } 1205 1206 // We have target-specific dag combine patterns for the following nodes: 1207 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 1208 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 1209 setTargetDAGCombine(ISD::VSELECT); 1210 setTargetDAGCombine(ISD::SELECT); 1211 setTargetDAGCombine(ISD::SHL); 1212 setTargetDAGCombine(ISD::SRA); 1213 setTargetDAGCombine(ISD::SRL); 1214 setTargetDAGCombine(ISD::OR); 1215 setTargetDAGCombine(ISD::AND); 1216 setTargetDAGCombine(ISD::ADD); 1217 setTargetDAGCombine(ISD::FADD); 1218 setTargetDAGCombine(ISD::FSUB); 1219 setTargetDAGCombine(ISD::FMA); 1220 setTargetDAGCombine(ISD::SUB); 1221 setTargetDAGCombine(ISD::LOAD); 1222 setTargetDAGCombine(ISD::STORE); 1223 setTargetDAGCombine(ISD::ZERO_EXTEND); 1224 setTargetDAGCombine(ISD::ANY_EXTEND); 1225 setTargetDAGCombine(ISD::SIGN_EXTEND); 1226 setTargetDAGCombine(ISD::TRUNCATE); 1227 setTargetDAGCombine(ISD::UINT_TO_FP); 1228 setTargetDAGCombine(ISD::SINT_TO_FP); 1229 setTargetDAGCombine(ISD::SETCC); 1230 setTargetDAGCombine(ISD::FP_TO_SINT); 1231 if (Subtarget->is64Bit()) 1232 setTargetDAGCombine(ISD::MUL); 1233 setTargetDAGCombine(ISD::XOR); 1234 1235 computeRegisterProperties(); 1236 1237 // On Darwin, -Os means optimize for size without hurting performance, 1238 // do not reduce the limit. 1239 maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores 1240 maxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8; 1241 maxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores 1242 maxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1243 maxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores 1244 maxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1245 setPrefLoopAlignment(4); // 2^4 bytes. 1246 benefitFromCodePlacementOpt = true; 1247 1248 // Predictable cmov don't hurt on atom because it's in-order. 1249 predictableSelectIsExpensive = !Subtarget->isAtom(); 1250 1251 setPrefFunctionAlignment(4); // 2^4 bytes. 1252} 1253 1254 1255EVT X86TargetLowering::getSetCCResultType(EVT VT) const { 1256 if (!VT.isVector()) return MVT::i8; 1257 return VT.changeVectorElementTypeToInteger(); 1258} 1259 1260 1261/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1262/// the desired ByVal argument alignment. 1263static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) { 1264 if (MaxAlign == 16) 1265 return; 1266 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1267 if (VTy->getBitWidth() == 128) 1268 MaxAlign = 16; 1269 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1270 unsigned EltAlign = 0; 1271 getMaxByValAlign(ATy->getElementType(), EltAlign); 1272 if (EltAlign > MaxAlign) 1273 MaxAlign = EltAlign; 1274 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1275 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1276 unsigned EltAlign = 0; 1277 getMaxByValAlign(STy->getElementType(i), EltAlign); 1278 if (EltAlign > MaxAlign) 1279 MaxAlign = EltAlign; 1280 if (MaxAlign == 16) 1281 break; 1282 } 1283 } 1284} 1285 1286/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1287/// function arguments in the caller parameter area. For X86, aggregates 1288/// that contain SSE vectors are placed at 16-byte boundaries while the rest 1289/// are at 4-byte boundaries. 1290unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const { 1291 if (Subtarget->is64Bit()) { 1292 // Max of 8 and alignment of type. 1293 unsigned TyAlign = TD->getABITypeAlignment(Ty); 1294 if (TyAlign > 8) 1295 return TyAlign; 1296 return 8; 1297 } 1298 1299 unsigned Align = 4; 1300 if (Subtarget->hasSSE1()) 1301 getMaxByValAlign(Ty, Align); 1302 return Align; 1303} 1304 1305/// getOptimalMemOpType - Returns the target specific optimal type for load 1306/// and store operations as a result of memset, memcpy, and memmove 1307/// lowering. If DstAlign is zero that means it's safe to destination 1308/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 1309/// means there isn't a need to check it against alignment requirement, 1310/// probably because the source does not need to be loaded. If 1311/// 'IsZeroVal' is true, that means it's safe to return a 1312/// non-scalar-integer type, e.g. empty string source, constant, or loaded 1313/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 1314/// constant so it does not need to be loaded. 1315/// It returns EVT::Other if the type should be determined using generic 1316/// target-independent logic. 1317EVT 1318X86TargetLowering::getOptimalMemOpType(uint64_t Size, 1319 unsigned DstAlign, unsigned SrcAlign, 1320 bool IsZeroVal, 1321 bool MemcpyStrSrc, 1322 MachineFunction &MF) const { 1323 // FIXME: This turns off use of xmm stores for memset/memcpy on targets like 1324 // linux. This is because the stack realignment code can't handle certain 1325 // cases like PR2962. This should be removed when PR2962 is fixed. 1326 const Function *F = MF.getFunction(); 1327 if (IsZeroVal && 1328 !F->hasFnAttr(Attribute::NoImplicitFloat)) { 1329 if (Size >= 16 && 1330 (Subtarget->isUnalignedMemAccessFast() || 1331 ((DstAlign == 0 || DstAlign >= 16) && 1332 (SrcAlign == 0 || SrcAlign >= 16))) && 1333 Subtarget->getStackAlignment() >= 16) { 1334 if (Subtarget->getStackAlignment() >= 32) { 1335 if (Subtarget->hasAVX2()) 1336 return MVT::v8i32; 1337 if (Subtarget->hasAVX()) 1338 return MVT::v8f32; 1339 } 1340 if (Subtarget->hasSSE2()) 1341 return MVT::v4i32; 1342 if (Subtarget->hasSSE1()) 1343 return MVT::v4f32; 1344 } else if (!MemcpyStrSrc && Size >= 8 && 1345 !Subtarget->is64Bit() && 1346 Subtarget->getStackAlignment() >= 8 && 1347 Subtarget->hasSSE2()) { 1348 // Do not use f64 to lower memcpy if source is string constant. It's 1349 // better to use i32 to avoid the loads. 1350 return MVT::f64; 1351 } 1352 } 1353 if (Subtarget->is64Bit() && Size >= 8) 1354 return MVT::i64; 1355 return MVT::i32; 1356} 1357 1358/// getJumpTableEncoding - Return the entry encoding for a jump table in the 1359/// current function. The returned value is a member of the 1360/// MachineJumpTableInfo::JTEntryKind enum. 1361unsigned X86TargetLowering::getJumpTableEncoding() const { 1362 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF 1363 // symbol. 1364 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1365 Subtarget->isPICStyleGOT()) 1366 return MachineJumpTableInfo::EK_Custom32; 1367 1368 // Otherwise, use the normal jump table encoding heuristics. 1369 return TargetLowering::getJumpTableEncoding(); 1370} 1371 1372const MCExpr * 1373X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, 1374 const MachineBasicBlock *MBB, 1375 unsigned uid,MCContext &Ctx) const{ 1376 assert(getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1377 Subtarget->isPICStyleGOT()); 1378 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF 1379 // entries. 1380 return MCSymbolRefExpr::Create(MBB->getSymbol(), 1381 MCSymbolRefExpr::VK_GOTOFF, Ctx); 1382} 1383 1384/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 1385/// jumptable. 1386SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, 1387 SelectionDAG &DAG) const { 1388 if (!Subtarget->is64Bit()) 1389 // This doesn't have DebugLoc associated with it, but is not really the 1390 // same as a Register. 1391 return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()); 1392 return Table; 1393} 1394 1395/// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 1396/// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 1397/// MCExpr. 1398const MCExpr *X86TargetLowering:: 1399getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, 1400 MCContext &Ctx) const { 1401 // X86-64 uses RIP relative addressing based on the jump table label. 1402 if (Subtarget->isPICStyleRIPRel()) 1403 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 1404 1405 // Otherwise, the reference is relative to the PIC base. 1406 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx); 1407} 1408 1409// FIXME: Why this routine is here? Move to RegInfo! 1410std::pair<const TargetRegisterClass*, uint8_t> 1411X86TargetLowering::findRepresentativeClass(EVT VT) const{ 1412 const TargetRegisterClass *RRC = 0; 1413 uint8_t Cost = 1; 1414 switch (VT.getSimpleVT().SimpleTy) { 1415 default: 1416 return TargetLowering::findRepresentativeClass(VT); 1417 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: 1418 RRC = Subtarget->is64Bit() ? 1419 (const TargetRegisterClass*)&X86::GR64RegClass : 1420 (const TargetRegisterClass*)&X86::GR32RegClass; 1421 break; 1422 case MVT::x86mmx: 1423 RRC = &X86::VR64RegClass; 1424 break; 1425 case MVT::f32: case MVT::f64: 1426 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1427 case MVT::v4f32: case MVT::v2f64: 1428 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32: 1429 case MVT::v4f64: 1430 RRC = &X86::VR128RegClass; 1431 break; 1432 } 1433 return std::make_pair(RRC, Cost); 1434} 1435 1436bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace, 1437 unsigned &Offset) const { 1438 if (!Subtarget->isTargetLinux()) 1439 return false; 1440 1441 if (Subtarget->is64Bit()) { 1442 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs: 1443 Offset = 0x28; 1444 if (getTargetMachine().getCodeModel() == CodeModel::Kernel) 1445 AddressSpace = 256; 1446 else 1447 AddressSpace = 257; 1448 } else { 1449 // %gs:0x14 on i386 1450 Offset = 0x14; 1451 AddressSpace = 256; 1452 } 1453 return true; 1454} 1455 1456 1457//===----------------------------------------------------------------------===// 1458// Return Value Calling Convention Implementation 1459//===----------------------------------------------------------------------===// 1460 1461#include "X86GenCallingConv.inc" 1462 1463bool 1464X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, 1465 MachineFunction &MF, bool isVarArg, 1466 const SmallVectorImpl<ISD::OutputArg> &Outs, 1467 LLVMContext &Context) const { 1468 SmallVector<CCValAssign, 16> RVLocs; 1469 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1470 RVLocs, Context); 1471 return CCInfo.CheckReturn(Outs, RetCC_X86); 1472} 1473 1474SDValue 1475X86TargetLowering::LowerReturn(SDValue Chain, 1476 CallingConv::ID CallConv, bool isVarArg, 1477 const SmallVectorImpl<ISD::OutputArg> &Outs, 1478 const SmallVectorImpl<SDValue> &OutVals, 1479 DebugLoc dl, SelectionDAG &DAG) const { 1480 MachineFunction &MF = DAG.getMachineFunction(); 1481 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1482 1483 SmallVector<CCValAssign, 16> RVLocs; 1484 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1485 RVLocs, *DAG.getContext()); 1486 CCInfo.AnalyzeReturn(Outs, RetCC_X86); 1487 1488 // Add the regs to the liveout set for the function. 1489 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 1490 for (unsigned i = 0; i != RVLocs.size(); ++i) 1491 if (RVLocs[i].isRegLoc() && !MRI.isLiveOut(RVLocs[i].getLocReg())) 1492 MRI.addLiveOut(RVLocs[i].getLocReg()); 1493 1494 SDValue Flag; 1495 1496 SmallVector<SDValue, 6> RetOps; 1497 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1498 // Operand #1 = Bytes To Pop 1499 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), 1500 MVT::i16)); 1501 1502 // Copy the result values into the output registers. 1503 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1504 CCValAssign &VA = RVLocs[i]; 1505 assert(VA.isRegLoc() && "Can only return in registers!"); 1506 SDValue ValToCopy = OutVals[i]; 1507 EVT ValVT = ValToCopy.getValueType(); 1508 1509 // Promote values to the appropriate types 1510 if (VA.getLocInfo() == CCValAssign::SExt) 1511 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy); 1512 else if (VA.getLocInfo() == CCValAssign::ZExt) 1513 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy); 1514 else if (VA.getLocInfo() == CCValAssign::AExt) 1515 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy); 1516 else if (VA.getLocInfo() == CCValAssign::BCvt) 1517 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy); 1518 1519 // If this is x86-64, and we disabled SSE, we can't return FP values, 1520 // or SSE or MMX vectors. 1521 if ((ValVT == MVT::f32 || ValVT == MVT::f64 || 1522 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) && 1523 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) { 1524 report_fatal_error("SSE register return with SSE disabled"); 1525 } 1526 // Likewise we can't return F64 values with SSE1 only. gcc does so, but 1527 // llvm-gcc has never done it right and no one has noticed, so this 1528 // should be OK for now. 1529 if (ValVT == MVT::f64 && 1530 (Subtarget->is64Bit() && !Subtarget->hasSSE2())) 1531 report_fatal_error("SSE2 register return with SSE2 disabled"); 1532 1533 // Returns in ST0/ST1 are handled specially: these are pushed as operands to 1534 // the RET instruction and handled by the FP Stackifier. 1535 if (VA.getLocReg() == X86::ST0 || 1536 VA.getLocReg() == X86::ST1) { 1537 // If this is a copy from an xmm register to ST(0), use an FPExtend to 1538 // change the value to the FP stack register class. 1539 if (isScalarFPTypeInSSEReg(VA.getValVT())) 1540 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy); 1541 RetOps.push_back(ValToCopy); 1542 // Don't emit a copytoreg. 1543 continue; 1544 } 1545 1546 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64 1547 // which is returned in RAX / RDX. 1548 if (Subtarget->is64Bit()) { 1549 if (ValVT == MVT::x86mmx) { 1550 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { 1551 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy); 1552 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 1553 ValToCopy); 1554 // If we don't have SSE2 available, convert to v4f32 so the generated 1555 // register is legal. 1556 if (!Subtarget->hasSSE2()) 1557 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy); 1558 } 1559 } 1560 } 1561 1562 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag); 1563 Flag = Chain.getValue(1); 1564 } 1565 1566 // The x86-64 ABI for returning structs by value requires that we copy 1567 // the sret argument into %rax for the return. We saved the argument into 1568 // a virtual register in the entry block, so now we copy the value out 1569 // and into %rax. 1570 if (Subtarget->is64Bit() && 1571 DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { 1572 MachineFunction &MF = DAG.getMachineFunction(); 1573 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1574 unsigned Reg = FuncInfo->getSRetReturnReg(); 1575 assert(Reg && 1576 "SRetReturnReg should have been set in LowerFormalArguments()."); 1577 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); 1578 1579 Chain = DAG.getCopyToReg(Chain, dl, X86::RAX, Val, Flag); 1580 Flag = Chain.getValue(1); 1581 1582 // RAX now acts like a return value. 1583 MRI.addLiveOut(X86::RAX); 1584 } 1585 1586 RetOps[0] = Chain; // Update chain. 1587 1588 // Add the flag if we have it. 1589 if (Flag.getNode()) 1590 RetOps.push_back(Flag); 1591 1592 return DAG.getNode(X86ISD::RET_FLAG, dl, 1593 MVT::Other, &RetOps[0], RetOps.size()); 1594} 1595 1596bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 1597 if (N->getNumValues() != 1) 1598 return false; 1599 if (!N->hasNUsesOfValue(1, 0)) 1600 return false; 1601 1602 SDValue TCChain = Chain; 1603 SDNode *Copy = *N->use_begin(); 1604 if (Copy->getOpcode() == ISD::CopyToReg) { 1605 // If the copy has a glue operand, we conservatively assume it isn't safe to 1606 // perform a tail call. 1607 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 1608 return false; 1609 TCChain = Copy->getOperand(0); 1610 } else if (Copy->getOpcode() != ISD::FP_EXTEND) 1611 return false; 1612 1613 bool HasRet = false; 1614 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1615 UI != UE; ++UI) { 1616 if (UI->getOpcode() != X86ISD::RET_FLAG) 1617 return false; 1618 HasRet = true; 1619 } 1620 1621 if (!HasRet) 1622 return false; 1623 1624 Chain = TCChain; 1625 return true; 1626} 1627 1628EVT 1629X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, 1630 ISD::NodeType ExtendKind) const { 1631 MVT ReturnMVT; 1632 // TODO: Is this also valid on 32-bit? 1633 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND) 1634 ReturnMVT = MVT::i8; 1635 else 1636 ReturnMVT = MVT::i32; 1637 1638 EVT MinVT = getRegisterType(Context, ReturnMVT); 1639 return VT.bitsLT(MinVT) ? MinVT : VT; 1640} 1641 1642/// LowerCallResult - Lower the result values of a call into the 1643/// appropriate copies out of appropriate physical registers. 1644/// 1645SDValue 1646X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1647 CallingConv::ID CallConv, bool isVarArg, 1648 const SmallVectorImpl<ISD::InputArg> &Ins, 1649 DebugLoc dl, SelectionDAG &DAG, 1650 SmallVectorImpl<SDValue> &InVals) const { 1651 1652 // Assign locations to each value returned by this call. 1653 SmallVector<CCValAssign, 16> RVLocs; 1654 bool Is64Bit = Subtarget->is64Bit(); 1655 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1656 getTargetMachine(), RVLocs, *DAG.getContext()); 1657 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 1658 1659 // Copy all of the result registers out of their specified physreg. 1660 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1661 CCValAssign &VA = RVLocs[i]; 1662 EVT CopyVT = VA.getValVT(); 1663 1664 // If this is x86-64, and we disabled SSE, we can't return FP values 1665 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) && 1666 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) { 1667 report_fatal_error("SSE register return with SSE disabled"); 1668 } 1669 1670 SDValue Val; 1671 1672 // If this is a call to a function that returns an fp value on the floating 1673 // point stack, we must guarantee the value is popped from the stack, so 1674 // a CopyFromReg is not good enough - the copy instruction may be eliminated 1675 // if the return value is not used. We use the FpPOP_RETVAL instruction 1676 // instead. 1677 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) { 1678 // If we prefer to use the value in xmm registers, copy it out as f80 and 1679 // use a truncate to move it from fp stack reg to xmm reg. 1680 if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80; 1681 SDValue Ops[] = { Chain, InFlag }; 1682 Chain = SDValue(DAG.getMachineNode(X86::FpPOP_RETVAL, dl, CopyVT, 1683 MVT::Other, MVT::Glue, Ops, 2), 1); 1684 Val = Chain.getValue(0); 1685 1686 // Round the f80 to the right size, which also moves it to the appropriate 1687 // xmm register. 1688 if (CopyVT != VA.getValVT()) 1689 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val, 1690 // This truncation won't change the value. 1691 DAG.getIntPtrConstant(1)); 1692 } else { 1693 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), 1694 CopyVT, InFlag).getValue(1); 1695 Val = Chain.getValue(0); 1696 } 1697 InFlag = Chain.getValue(2); 1698 InVals.push_back(Val); 1699 } 1700 1701 return Chain; 1702} 1703 1704 1705//===----------------------------------------------------------------------===// 1706// C & StdCall & Fast Calling Convention implementation 1707//===----------------------------------------------------------------------===// 1708// StdCall calling convention seems to be standard for many Windows' API 1709// routines and around. It differs from C calling convention just a little: 1710// callee should clean up the stack, not caller. Symbols should be also 1711// decorated in some fancy way :) It doesn't support any vector arguments. 1712// For info on fast calling convention see Fast Calling Convention (tail call) 1713// implementation LowerX86_32FastCCCallTo. 1714 1715/// CallIsStructReturn - Determines whether a call uses struct return 1716/// semantics. 1717enum StructReturnType { 1718 NotStructReturn, 1719 RegStructReturn, 1720 StackStructReturn 1721}; 1722static StructReturnType 1723callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) { 1724 if (Outs.empty()) 1725 return NotStructReturn; 1726 1727 const ISD::ArgFlagsTy &Flags = Outs[0].Flags; 1728 if (!Flags.isSRet()) 1729 return NotStructReturn; 1730 if (Flags.isInReg()) 1731 return RegStructReturn; 1732 return StackStructReturn; 1733} 1734 1735/// ArgsAreStructReturn - Determines whether a function uses struct 1736/// return semantics. 1737static StructReturnType 1738argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) { 1739 if (Ins.empty()) 1740 return NotStructReturn; 1741 1742 const ISD::ArgFlagsTy &Flags = Ins[0].Flags; 1743 if (!Flags.isSRet()) 1744 return NotStructReturn; 1745 if (Flags.isInReg()) 1746 return RegStructReturn; 1747 return StackStructReturn; 1748} 1749 1750/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1751/// by "Src" to address "Dst" with size and alignment information specified by 1752/// the specific parameter attribute. The copy will be passed as a byval 1753/// function parameter. 1754static SDValue 1755CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1756 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1757 DebugLoc dl) { 1758 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1759 1760 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1761 /*isVolatile*/false, /*AlwaysInline=*/true, 1762 MachinePointerInfo(), MachinePointerInfo()); 1763} 1764 1765/// IsTailCallConvention - Return true if the calling convention is one that 1766/// supports tail call optimization. 1767static bool IsTailCallConvention(CallingConv::ID CC) { 1768 return (CC == CallingConv::Fast || CC == CallingConv::GHC); 1769} 1770 1771bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1772 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls) 1773 return false; 1774 1775 CallSite CS(CI); 1776 CallingConv::ID CalleeCC = CS.getCallingConv(); 1777 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C) 1778 return false; 1779 1780 return true; 1781} 1782 1783/// FuncIsMadeTailCallSafe - Return true if the function is being made into 1784/// a tailcall target by changing its ABI. 1785static bool FuncIsMadeTailCallSafe(CallingConv::ID CC, 1786 bool GuaranteedTailCallOpt) { 1787 return GuaranteedTailCallOpt && IsTailCallConvention(CC); 1788} 1789 1790SDValue 1791X86TargetLowering::LowerMemArgument(SDValue Chain, 1792 CallingConv::ID CallConv, 1793 const SmallVectorImpl<ISD::InputArg> &Ins, 1794 DebugLoc dl, SelectionDAG &DAG, 1795 const CCValAssign &VA, 1796 MachineFrameInfo *MFI, 1797 unsigned i) const { 1798 // Create the nodes corresponding to a load from this parameter slot. 1799 ISD::ArgFlagsTy Flags = Ins[i].Flags; 1800 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv, 1801 getTargetMachine().Options.GuaranteedTailCallOpt); 1802 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); 1803 EVT ValVT; 1804 1805 // If value is passed by pointer we have address passed instead of the value 1806 // itself. 1807 if (VA.getLocInfo() == CCValAssign::Indirect) 1808 ValVT = VA.getLocVT(); 1809 else 1810 ValVT = VA.getValVT(); 1811 1812 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1813 // changed with more analysis. 1814 // In case of tail call optimization mark all arguments mutable. Since they 1815 // could be overwritten by lowering of arguments in case of a tail call. 1816 if (Flags.isByVal()) { 1817 unsigned Bytes = Flags.getByValSize(); 1818 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 1819 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable); 1820 return DAG.getFrameIndex(FI, getPointerTy()); 1821 } else { 1822 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, 1823 VA.getLocMemOffset(), isImmutable); 1824 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1825 return DAG.getLoad(ValVT, dl, Chain, FIN, 1826 MachinePointerInfo::getFixedStack(FI), 1827 false, false, false, 0); 1828 } 1829} 1830 1831SDValue 1832X86TargetLowering::LowerFormalArguments(SDValue Chain, 1833 CallingConv::ID CallConv, 1834 bool isVarArg, 1835 const SmallVectorImpl<ISD::InputArg> &Ins, 1836 DebugLoc dl, 1837 SelectionDAG &DAG, 1838 SmallVectorImpl<SDValue> &InVals) 1839 const { 1840 MachineFunction &MF = DAG.getMachineFunction(); 1841 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1842 1843 const Function* Fn = MF.getFunction(); 1844 if (Fn->hasExternalLinkage() && 1845 Subtarget->isTargetCygMing() && 1846 Fn->getName() == "main") 1847 FuncInfo->setForceFramePointer(true); 1848 1849 MachineFrameInfo *MFI = MF.getFrameInfo(); 1850 bool Is64Bit = Subtarget->is64Bit(); 1851 bool IsWindows = Subtarget->isTargetWindows(); 1852 bool IsWin64 = Subtarget->isTargetWin64(); 1853 1854 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 1855 "Var args not supported with calling convention fastcc or ghc"); 1856 1857 // Assign locations to all of the incoming arguments. 1858 SmallVector<CCValAssign, 16> ArgLocs; 1859 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1860 ArgLocs, *DAG.getContext()); 1861 1862 // Allocate shadow area for Win64 1863 if (IsWin64) { 1864 CCInfo.AllocateStack(32, 8); 1865 } 1866 1867 CCInfo.AnalyzeFormalArguments(Ins, CC_X86); 1868 1869 unsigned LastVal = ~0U; 1870 SDValue ArgValue; 1871 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1872 CCValAssign &VA = ArgLocs[i]; 1873 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1874 // places. 1875 assert(VA.getValNo() != LastVal && 1876 "Don't support value assigned to multiple locs yet"); 1877 (void)LastVal; 1878 LastVal = VA.getValNo(); 1879 1880 if (VA.isRegLoc()) { 1881 EVT RegVT = VA.getLocVT(); 1882 const TargetRegisterClass *RC; 1883 if (RegVT == MVT::i32) 1884 RC = &X86::GR32RegClass; 1885 else if (Is64Bit && RegVT == MVT::i64) 1886 RC = &X86::GR64RegClass; 1887 else if (RegVT == MVT::f32) 1888 RC = &X86::FR32RegClass; 1889 else if (RegVT == MVT::f64) 1890 RC = &X86::FR64RegClass; 1891 else if (RegVT.is256BitVector()) 1892 RC = &X86::VR256RegClass; 1893 else if (RegVT.is128BitVector()) 1894 RC = &X86::VR128RegClass; 1895 else if (RegVT == MVT::x86mmx) 1896 RC = &X86::VR64RegClass; 1897 else 1898 llvm_unreachable("Unknown argument type!"); 1899 1900 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1901 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 1902 1903 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1904 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1905 // right size. 1906 if (VA.getLocInfo() == CCValAssign::SExt) 1907 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 1908 DAG.getValueType(VA.getValVT())); 1909 else if (VA.getLocInfo() == CCValAssign::ZExt) 1910 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 1911 DAG.getValueType(VA.getValVT())); 1912 else if (VA.getLocInfo() == CCValAssign::BCvt) 1913 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 1914 1915 if (VA.isExtInLoc()) { 1916 // Handle MMX values passed in XMM regs. 1917 if (RegVT.isVector()) { 1918 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), 1919 ArgValue); 1920 } else 1921 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 1922 } 1923 } else { 1924 assert(VA.isMemLoc()); 1925 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i); 1926 } 1927 1928 // If value is passed via pointer - do a load. 1929 if (VA.getLocInfo() == CCValAssign::Indirect) 1930 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, 1931 MachinePointerInfo(), false, false, false, 0); 1932 1933 InVals.push_back(ArgValue); 1934 } 1935 1936 // The x86-64 ABI for returning structs by value requires that we copy 1937 // the sret argument into %rax for the return. Save the argument into 1938 // a virtual register so that we can access it from the return points. 1939 if (Is64Bit && MF.getFunction()->hasStructRetAttr()) { 1940 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1941 unsigned Reg = FuncInfo->getSRetReturnReg(); 1942 if (!Reg) { 1943 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); 1944 FuncInfo->setSRetReturnReg(Reg); 1945 } 1946 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 1947 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 1948 } 1949 1950 unsigned StackSize = CCInfo.getNextStackOffset(); 1951 // Align stack specially for tail calls. 1952 if (FuncIsMadeTailCallSafe(CallConv, 1953 MF.getTarget().Options.GuaranteedTailCallOpt)) 1954 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1955 1956 // If the function takes variable number of arguments, make a frame index for 1957 // the start of the first vararg value... for expansion of llvm.va_start. 1958 if (isVarArg) { 1959 if (Is64Bit || (CallConv != CallingConv::X86_FastCall && 1960 CallConv != CallingConv::X86_ThisCall)) { 1961 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,true)); 1962 } 1963 if (Is64Bit) { 1964 unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0; 1965 1966 // FIXME: We should really autogenerate these arrays 1967 static const uint16_t GPR64ArgRegsWin64[] = { 1968 X86::RCX, X86::RDX, X86::R8, X86::R9 1969 }; 1970 static const uint16_t GPR64ArgRegs64Bit[] = { 1971 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1972 }; 1973 static const uint16_t XMMArgRegs64Bit[] = { 1974 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1975 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1976 }; 1977 const uint16_t *GPR64ArgRegs; 1978 unsigned NumXMMRegs = 0; 1979 1980 if (IsWin64) { 1981 // The XMM registers which might contain var arg parameters are shadowed 1982 // in their paired GPR. So we only need to save the GPR to their home 1983 // slots. 1984 TotalNumIntRegs = 4; 1985 GPR64ArgRegs = GPR64ArgRegsWin64; 1986 } else { 1987 TotalNumIntRegs = 6; TotalNumXMMRegs = 8; 1988 GPR64ArgRegs = GPR64ArgRegs64Bit; 1989 1990 NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs64Bit, 1991 TotalNumXMMRegs); 1992 } 1993 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 1994 TotalNumIntRegs); 1995 1996 bool NoImplicitFloatOps = Fn->hasFnAttr(Attribute::NoImplicitFloat); 1997 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) && 1998 "SSE register cannot be used when SSE is disabled!"); 1999 assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat && 2000 NoImplicitFloatOps) && 2001 "SSE register cannot be used when SSE is disabled!"); 2002 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps || 2003 !Subtarget->hasSSE1()) 2004 // Kernel mode asks for SSE to be disabled, so don't push them 2005 // on the stack. 2006 TotalNumXMMRegs = 0; 2007 2008 if (IsWin64) { 2009 const TargetFrameLowering &TFI = *getTargetMachine().getFrameLowering(); 2010 // Get to the caller-allocated home save location. Add 8 to account 2011 // for the return address. 2012 int HomeOffset = TFI.getOffsetOfLocalArea() + 8; 2013 FuncInfo->setRegSaveFrameIndex( 2014 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false)); 2015 // Fixup to set vararg frame on shadow area (4 x i64). 2016 if (NumIntRegs < 4) 2017 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex()); 2018 } else { 2019 // For X86-64, if there are vararg parameters that are passed via 2020 // registers, then we must store them to their spots on the stack so 2021 // they may be loaded by deferencing the result of va_next. 2022 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8); 2023 FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16); 2024 FuncInfo->setRegSaveFrameIndex( 2025 MFI->CreateStackObject(TotalNumIntRegs * 8 + TotalNumXMMRegs * 16, 16, 2026 false)); 2027 } 2028 2029 // Store the integer parameter registers. 2030 SmallVector<SDValue, 8> MemOps; 2031 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 2032 getPointerTy()); 2033 unsigned Offset = FuncInfo->getVarArgsGPOffset(); 2034 for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { 2035 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN, 2036 DAG.getIntPtrConstant(Offset)); 2037 unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs], 2038 &X86::GR64RegClass); 2039 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2040 SDValue Store = 2041 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2042 MachinePointerInfo::getFixedStack( 2043 FuncInfo->getRegSaveFrameIndex(), Offset), 2044 false, false, 0); 2045 MemOps.push_back(Store); 2046 Offset += 8; 2047 } 2048 2049 if (TotalNumXMMRegs != 0 && NumXMMRegs != TotalNumXMMRegs) { 2050 // Now store the XMM (fp + vector) parameter registers. 2051 SmallVector<SDValue, 11> SaveXMMOps; 2052 SaveXMMOps.push_back(Chain); 2053 2054 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass); 2055 SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8); 2056 SaveXMMOps.push_back(ALVal); 2057 2058 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2059 FuncInfo->getRegSaveFrameIndex())); 2060 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2061 FuncInfo->getVarArgsFPOffset())); 2062 2063 for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { 2064 unsigned VReg = MF.addLiveIn(XMMArgRegs64Bit[NumXMMRegs], 2065 &X86::VR128RegClass); 2066 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32); 2067 SaveXMMOps.push_back(Val); 2068 } 2069 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl, 2070 MVT::Other, 2071 &SaveXMMOps[0], SaveXMMOps.size())); 2072 } 2073 2074 if (!MemOps.empty()) 2075 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2076 &MemOps[0], MemOps.size()); 2077 } 2078 } 2079 2080 // Some CCs need callee pop. 2081 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2082 MF.getTarget().Options.GuaranteedTailCallOpt)) { 2083 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything. 2084 } else { 2085 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing. 2086 // If this is an sret function, the return should pop the hidden pointer. 2087 if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows && 2088 argsAreStructReturn(Ins) == StackStructReturn) 2089 FuncInfo->setBytesToPopOnReturn(4); 2090 } 2091 2092 if (!Is64Bit) { 2093 // RegSaveFrameIndex is X86-64 only. 2094 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA); 2095 if (CallConv == CallingConv::X86_FastCall || 2096 CallConv == CallingConv::X86_ThisCall) 2097 // fastcc functions can't have varargs. 2098 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA); 2099 } 2100 2101 FuncInfo->setArgumentStackSize(StackSize); 2102 2103 return Chain; 2104} 2105 2106SDValue 2107X86TargetLowering::LowerMemOpCallTo(SDValue Chain, 2108 SDValue StackPtr, SDValue Arg, 2109 DebugLoc dl, SelectionDAG &DAG, 2110 const CCValAssign &VA, 2111 ISD::ArgFlagsTy Flags) const { 2112 unsigned LocMemOffset = VA.getLocMemOffset(); 2113 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 2114 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 2115 if (Flags.isByVal()) 2116 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 2117 2118 return DAG.getStore(Chain, dl, Arg, PtrOff, 2119 MachinePointerInfo::getStack(LocMemOffset), 2120 false, false, 0); 2121} 2122 2123/// EmitTailCallLoadRetAddr - Emit a load of return address if tail call 2124/// optimization is performed and it is required. 2125SDValue 2126X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, 2127 SDValue &OutRetAddr, SDValue Chain, 2128 bool IsTailCall, bool Is64Bit, 2129 int FPDiff, DebugLoc dl) const { 2130 // Adjust the Return address stack slot. 2131 EVT VT = getPointerTy(); 2132 OutRetAddr = getReturnAddressFrameIndex(DAG); 2133 2134 // Load the "old" Return address. 2135 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(), 2136 false, false, false, 0); 2137 return SDValue(OutRetAddr.getNode(), 1); 2138} 2139 2140/// EmitTailCallStoreRetAddr - Emit a store of the return address if tail call 2141/// optimization is performed and it is required (FPDiff!=0). 2142static SDValue 2143EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, 2144 SDValue Chain, SDValue RetAddrFrIdx, 2145 bool Is64Bit, int FPDiff, DebugLoc dl) { 2146 // Store the return address to the appropriate stack slot. 2147 if (!FPDiff) return Chain; 2148 // Calculate the new stack slot for the return address. 2149 int SlotSize = Is64Bit ? 8 : 4; 2150 int NewReturnAddrFI = 2151 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false); 2152 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2153 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 2154 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, 2155 MachinePointerInfo::getFixedStack(NewReturnAddrFI), 2156 false, false, 0); 2157 return Chain; 2158} 2159 2160SDValue 2161X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 2162 SmallVectorImpl<SDValue> &InVals) const { 2163 SelectionDAG &DAG = CLI.DAG; 2164 DebugLoc &dl = CLI.DL; 2165 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 2166 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 2167 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 2168 SDValue Chain = CLI.Chain; 2169 SDValue Callee = CLI.Callee; 2170 CallingConv::ID CallConv = CLI.CallConv; 2171 bool &isTailCall = CLI.IsTailCall; 2172 bool isVarArg = CLI.IsVarArg; 2173 2174 MachineFunction &MF = DAG.getMachineFunction(); 2175 bool Is64Bit = Subtarget->is64Bit(); 2176 bool IsWin64 = Subtarget->isTargetWin64(); 2177 bool IsWindows = Subtarget->isTargetWindows(); 2178 StructReturnType SR = callIsStructReturn(Outs); 2179 bool IsSibcall = false; 2180 2181 if (MF.getTarget().Options.DisableTailCalls) 2182 isTailCall = false; 2183 2184 if (isTailCall) { 2185 // Check if it's really possible to do a tail call. 2186 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 2187 isVarArg, SR != NotStructReturn, 2188 MF.getFunction()->hasStructRetAttr(), 2189 Outs, OutVals, Ins, DAG); 2190 2191 // Sibcalls are automatically detected tailcalls which do not require 2192 // ABI changes. 2193 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall) 2194 IsSibcall = true; 2195 2196 if (isTailCall) 2197 ++NumTailCalls; 2198 } 2199 2200 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 2201 "Var args not supported with calling convention fastcc or ghc"); 2202 2203 // Analyze operands of the call, assigning locations to each operand. 2204 SmallVector<CCValAssign, 16> ArgLocs; 2205 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 2206 ArgLocs, *DAG.getContext()); 2207 2208 // Allocate shadow area for Win64 2209 if (IsWin64) { 2210 CCInfo.AllocateStack(32, 8); 2211 } 2212 2213 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2214 2215 // Get a count of how many bytes are to be pushed on the stack. 2216 unsigned NumBytes = CCInfo.getNextStackOffset(); 2217 if (IsSibcall) 2218 // This is a sibcall. The memory operands are available in caller's 2219 // own caller's stack. 2220 NumBytes = 0; 2221 else if (getTargetMachine().Options.GuaranteedTailCallOpt && 2222 IsTailCallConvention(CallConv)) 2223 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 2224 2225 int FPDiff = 0; 2226 if (isTailCall && !IsSibcall) { 2227 // Lower arguments at fp - stackoffset + fpdiff. 2228 unsigned NumBytesCallerPushed = 2229 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 2230 FPDiff = NumBytesCallerPushed - NumBytes; 2231 2232 // Set the delta of movement of the returnaddr stackslot. 2233 // But only set if delta is greater than previous delta. 2234 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 2235 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 2236 } 2237 2238 if (!IsSibcall) 2239 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 2240 2241 SDValue RetAddrFrIdx; 2242 // Load return address for tail calls. 2243 if (isTailCall && FPDiff) 2244 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall, 2245 Is64Bit, FPDiff, dl); 2246 2247 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2248 SmallVector<SDValue, 8> MemOpChains; 2249 SDValue StackPtr; 2250 2251 // Walk the register/memloc assignments, inserting copies/loads. In the case 2252 // of tail call optimization arguments are handle later. 2253 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2254 CCValAssign &VA = ArgLocs[i]; 2255 EVT RegVT = VA.getLocVT(); 2256 SDValue Arg = OutVals[i]; 2257 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2258 bool isByVal = Flags.isByVal(); 2259 2260 // Promote the value if needed. 2261 switch (VA.getLocInfo()) { 2262 default: llvm_unreachable("Unknown loc info!"); 2263 case CCValAssign::Full: break; 2264 case CCValAssign::SExt: 2265 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg); 2266 break; 2267 case CCValAssign::ZExt: 2268 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg); 2269 break; 2270 case CCValAssign::AExt: 2271 if (RegVT.is128BitVector()) { 2272 // Special case: passing MMX values in XMM registers. 2273 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 2274 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); 2275 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg); 2276 } else 2277 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg); 2278 break; 2279 case CCValAssign::BCvt: 2280 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg); 2281 break; 2282 case CCValAssign::Indirect: { 2283 // Store the argument. 2284 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 2285 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 2286 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot, 2287 MachinePointerInfo::getFixedStack(FI), 2288 false, false, 0); 2289 Arg = SpillSlot; 2290 break; 2291 } 2292 } 2293 2294 if (VA.isRegLoc()) { 2295 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2296 if (isVarArg && IsWin64) { 2297 // Win64 ABI requires argument XMM reg to be copied to the corresponding 2298 // shadow reg if callee is a varargs function. 2299 unsigned ShadowReg = 0; 2300 switch (VA.getLocReg()) { 2301 case X86::XMM0: ShadowReg = X86::RCX; break; 2302 case X86::XMM1: ShadowReg = X86::RDX; break; 2303 case X86::XMM2: ShadowReg = X86::R8; break; 2304 case X86::XMM3: ShadowReg = X86::R9; break; 2305 } 2306 if (ShadowReg) 2307 RegsToPass.push_back(std::make_pair(ShadowReg, Arg)); 2308 } 2309 } else if (!IsSibcall && (!isTailCall || isByVal)) { 2310 assert(VA.isMemLoc()); 2311 if (StackPtr.getNode() == 0) 2312 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, getPointerTy()); 2313 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 2314 dl, DAG, VA, Flags)); 2315 } 2316 } 2317 2318 if (!MemOpChains.empty()) 2319 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2320 &MemOpChains[0], MemOpChains.size()); 2321 2322 if (Subtarget->isPICStyleGOT()) { 2323 // ELF / PIC requires GOT in the EBX register before function calls via PLT 2324 // GOT pointer. 2325 if (!isTailCall) { 2326 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX), 2327 DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()))); 2328 } else { 2329 // If we are tail calling and generating PIC/GOT style code load the 2330 // address of the callee into ECX. The value in ecx is used as target of 2331 // the tail jump. This is done to circumvent the ebx/callee-saved problem 2332 // for tail calls on PIC/GOT architectures. Normally we would just put the 2333 // address of GOT into ebx and then call target@PLT. But for tail calls 2334 // ebx would be restored (since ebx is callee saved) before jumping to the 2335 // target@PLT. 2336 2337 // Note: The actual moving to ECX is done further down. 2338 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 2339 if (G && !G->getGlobal()->hasHiddenVisibility() && 2340 !G->getGlobal()->hasProtectedVisibility()) 2341 Callee = LowerGlobalAddress(Callee, DAG); 2342 else if (isa<ExternalSymbolSDNode>(Callee)) 2343 Callee = LowerExternalSymbol(Callee, DAG); 2344 } 2345 } 2346 2347 if (Is64Bit && isVarArg && !IsWin64) { 2348 // From AMD64 ABI document: 2349 // For calls that may call functions that use varargs or stdargs 2350 // (prototype-less calls or calls to functions containing ellipsis (...) in 2351 // the declaration) %al is used as hidden argument to specify the number 2352 // of SSE registers used. The contents of %al do not need to match exactly 2353 // the number of registers, but must be an ubound on the number of SSE 2354 // registers used and is in the range 0 - 8 inclusive. 2355 2356 // Count the number of XMM registers allocated. 2357 static const uint16_t XMMArgRegs[] = { 2358 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2359 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2360 }; 2361 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 2362 assert((Subtarget->hasSSE1() || !NumXMMRegs) 2363 && "SSE registers cannot be used when SSE is disabled"); 2364 2365 RegsToPass.push_back(std::make_pair(unsigned(X86::AL), 2366 DAG.getConstant(NumXMMRegs, MVT::i8))); 2367 } 2368 2369 // For tail calls lower the arguments to the 'real' stack slot. 2370 if (isTailCall) { 2371 // Force all the incoming stack arguments to be loaded from the stack 2372 // before any new outgoing arguments are stored to the stack, because the 2373 // outgoing stack slots may alias the incoming argument stack slots, and 2374 // the alias isn't otherwise explicit. This is slightly more conservative 2375 // than necessary, because it means that each store effectively depends 2376 // on every argument instead of just those arguments it would clobber. 2377 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain); 2378 2379 SmallVector<SDValue, 8> MemOpChains2; 2380 SDValue FIN; 2381 int FI = 0; 2382 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2383 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2384 CCValAssign &VA = ArgLocs[i]; 2385 if (VA.isRegLoc()) 2386 continue; 2387 assert(VA.isMemLoc()); 2388 SDValue Arg = OutVals[i]; 2389 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2390 // Create frame index. 2391 int32_t Offset = VA.getLocMemOffset()+FPDiff; 2392 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; 2393 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 2394 FIN = DAG.getFrameIndex(FI, getPointerTy()); 2395 2396 if (Flags.isByVal()) { 2397 // Copy relative to framepointer. 2398 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 2399 if (StackPtr.getNode() == 0) 2400 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, 2401 getPointerTy()); 2402 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source); 2403 2404 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, 2405 ArgChain, 2406 Flags, DAG, dl)); 2407 } else { 2408 // Store relative to framepointer. 2409 MemOpChains2.push_back( 2410 DAG.getStore(ArgChain, dl, Arg, FIN, 2411 MachinePointerInfo::getFixedStack(FI), 2412 false, false, 0)); 2413 } 2414 } 2415 } 2416 2417 if (!MemOpChains2.empty()) 2418 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2419 &MemOpChains2[0], MemOpChains2.size()); 2420 2421 // Store the return address to the appropriate stack slot. 2422 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit, 2423 FPDiff, dl); 2424 } 2425 2426 // Build a sequence of copy-to-reg nodes chained together with token chain 2427 // and flag operands which copy the outgoing args into registers. 2428 SDValue InFlag; 2429 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2430 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2431 RegsToPass[i].second, InFlag); 2432 InFlag = Chain.getValue(1); 2433 } 2434 2435 if (getTargetMachine().getCodeModel() == CodeModel::Large) { 2436 assert(Is64Bit && "Large code model is only legal in 64-bit mode."); 2437 // In the 64-bit large code model, we have to make all calls 2438 // through a register, since the call instruction's 32-bit 2439 // pc-relative offset may not be large enough to hold the whole 2440 // address. 2441 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2442 // If the callee is a GlobalAddress node (quite common, every direct call 2443 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack 2444 // it. 2445 2446 // We should use extra load for direct calls to dllimported functions in 2447 // non-JIT mode. 2448 const GlobalValue *GV = G->getGlobal(); 2449 if (!GV->hasDLLImportLinkage()) { 2450 unsigned char OpFlags = 0; 2451 bool ExtraLoad = false; 2452 unsigned WrapperKind = ISD::DELETED_NODE; 2453 2454 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to 2455 // external symbols most go through the PLT in PIC mode. If the symbol 2456 // has hidden or protected visibility, or if it is static or local, then 2457 // we don't need to use the PLT - we can directly call it. 2458 if (Subtarget->isTargetELF() && 2459 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 2460 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) { 2461 OpFlags = X86II::MO_PLT; 2462 } else if (Subtarget->isPICStyleStubAny() && 2463 (GV->isDeclaration() || GV->isWeakForLinker()) && 2464 (!Subtarget->getTargetTriple().isMacOSX() || 2465 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2466 // PC-relative references to external symbols should go through $stub, 2467 // unless we're building with the leopard linker or later, which 2468 // automatically synthesizes these stubs. 2469 OpFlags = X86II::MO_DARWIN_STUB; 2470 } else if (Subtarget->isPICStyleRIPRel() && 2471 isa<Function>(GV) && 2472 cast<Function>(GV)->hasFnAttr(Attribute::NonLazyBind)) { 2473 // If the function is marked as non-lazy, generate an indirect call 2474 // which loads from the GOT directly. This avoids runtime overhead 2475 // at the cost of eager binding (and one extra byte of encoding). 2476 OpFlags = X86II::MO_GOTPCREL; 2477 WrapperKind = X86ISD::WrapperRIP; 2478 ExtraLoad = true; 2479 } 2480 2481 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 2482 G->getOffset(), OpFlags); 2483 2484 // Add a wrapper if needed. 2485 if (WrapperKind != ISD::DELETED_NODE) 2486 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee); 2487 // Add extra indirection if needed. 2488 if (ExtraLoad) 2489 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee, 2490 MachinePointerInfo::getGOT(), 2491 false, false, false, 0); 2492 } 2493 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2494 unsigned char OpFlags = 0; 2495 2496 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to 2497 // external symbols should go through the PLT. 2498 if (Subtarget->isTargetELF() && 2499 getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2500 OpFlags = X86II::MO_PLT; 2501 } else if (Subtarget->isPICStyleStubAny() && 2502 (!Subtarget->getTargetTriple().isMacOSX() || 2503 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2504 // PC-relative references to external symbols should go through $stub, 2505 // unless we're building with the leopard linker or later, which 2506 // automatically synthesizes these stubs. 2507 OpFlags = X86II::MO_DARWIN_STUB; 2508 } 2509 2510 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(), 2511 OpFlags); 2512 } 2513 2514 // Returns a chain & a flag for retval copy to use. 2515 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2516 SmallVector<SDValue, 8> Ops; 2517 2518 if (!IsSibcall && isTailCall) { 2519 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2520 DAG.getIntPtrConstant(0, true), InFlag); 2521 InFlag = Chain.getValue(1); 2522 } 2523 2524 Ops.push_back(Chain); 2525 Ops.push_back(Callee); 2526 2527 if (isTailCall) 2528 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 2529 2530 // Add argument registers to the end of the list so that they are known live 2531 // into the call. 2532 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2533 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2534 RegsToPass[i].second.getValueType())); 2535 2536 // Add a register mask operand representing the call-preserved registers. 2537 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 2538 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 2539 assert(Mask && "Missing call preserved mask for calling convention"); 2540 Ops.push_back(DAG.getRegisterMask(Mask)); 2541 2542 if (InFlag.getNode()) 2543 Ops.push_back(InFlag); 2544 2545 if (isTailCall) { 2546 // We used to do: 2547 //// If this is the first return lowered for this function, add the regs 2548 //// to the liveout set for the function. 2549 // This isn't right, although it's probably harmless on x86; liveouts 2550 // should be computed from returns not tail calls. Consider a void 2551 // function making a tail call to a function returning int. 2552 return DAG.getNode(X86ISD::TC_RETURN, dl, 2553 NodeTys, &Ops[0], Ops.size()); 2554 } 2555 2556 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); 2557 InFlag = Chain.getValue(1); 2558 2559 // Create the CALLSEQ_END node. 2560 unsigned NumBytesForCalleeToPush; 2561 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2562 getTargetMachine().Options.GuaranteedTailCallOpt)) 2563 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 2564 else if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows && 2565 SR == StackStructReturn) 2566 // If this is a call to a struct-return function, the callee 2567 // pops the hidden struct pointer, so we have to push it back. 2568 // This is common for Darwin/X86, Linux & Mingw32 targets. 2569 // For MSVC Win32 targets, the caller pops the hidden struct pointer. 2570 NumBytesForCalleeToPush = 4; 2571 else 2572 NumBytesForCalleeToPush = 0; // Callee pops nothing. 2573 2574 // Returns a flag for retval copy to use. 2575 if (!IsSibcall) { 2576 Chain = DAG.getCALLSEQ_END(Chain, 2577 DAG.getIntPtrConstant(NumBytes, true), 2578 DAG.getIntPtrConstant(NumBytesForCalleeToPush, 2579 true), 2580 InFlag); 2581 InFlag = Chain.getValue(1); 2582 } 2583 2584 // Handle result values, copying them out of physregs into vregs that we 2585 // return. 2586 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 2587 Ins, dl, DAG, InVals); 2588} 2589 2590 2591//===----------------------------------------------------------------------===// 2592// Fast Calling Convention (tail call) implementation 2593//===----------------------------------------------------------------------===// 2594 2595// Like std call, callee cleans arguments, convention except that ECX is 2596// reserved for storing the tail called function address. Only 2 registers are 2597// free for argument passing (inreg). Tail call optimization is performed 2598// provided: 2599// * tailcallopt is enabled 2600// * caller/callee are fastcc 2601// On X86_64 architecture with GOT-style position independent code only local 2602// (within module) calls are supported at the moment. 2603// To keep the stack aligned according to platform abi the function 2604// GetAlignedArgumentStackSize ensures that argument delta is always multiples 2605// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 2606// If a tail called function callee has more arguments than the caller the 2607// caller needs to make sure that there is room to move the RETADDR to. This is 2608// achieved by reserving an area the size of the argument delta right after the 2609// original REtADDR, but before the saved framepointer or the spilled registers 2610// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 2611// stack layout: 2612// arg1 2613// arg2 2614// RETADDR 2615// [ new RETADDR 2616// move area ] 2617// (possible EBP) 2618// ESI 2619// EDI 2620// local1 .. 2621 2622/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 2623/// for a 16 byte align requirement. 2624unsigned 2625X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 2626 SelectionDAG& DAG) const { 2627 MachineFunction &MF = DAG.getMachineFunction(); 2628 const TargetMachine &TM = MF.getTarget(); 2629 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 2630 unsigned StackAlignment = TFI.getStackAlignment(); 2631 uint64_t AlignMask = StackAlignment - 1; 2632 int64_t Offset = StackSize; 2633 uint64_t SlotSize = TD->getPointerSize(); 2634 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 2635 // Number smaller than 12 so just add the difference. 2636 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 2637 } else { 2638 // Mask out lower bits, add stackalignment once plus the 12 bytes. 2639 Offset = ((~AlignMask) & Offset) + StackAlignment + 2640 (StackAlignment-SlotSize); 2641 } 2642 return Offset; 2643} 2644 2645/// MatchingStackOffset - Return true if the given stack call argument is 2646/// already available in the same position (relatively) of the caller's 2647/// incoming argument stack. 2648static 2649bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2650 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 2651 const X86InstrInfo *TII) { 2652 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 2653 int FI = INT_MAX; 2654 if (Arg.getOpcode() == ISD::CopyFromReg) { 2655 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2656 if (!TargetRegisterInfo::isVirtualRegister(VR)) 2657 return false; 2658 MachineInstr *Def = MRI->getVRegDef(VR); 2659 if (!Def) 2660 return false; 2661 if (!Flags.isByVal()) { 2662 if (!TII->isLoadFromStackSlot(Def, FI)) 2663 return false; 2664 } else { 2665 unsigned Opcode = Def->getOpcode(); 2666 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) && 2667 Def->getOperand(1).isFI()) { 2668 FI = Def->getOperand(1).getIndex(); 2669 Bytes = Flags.getByValSize(); 2670 } else 2671 return false; 2672 } 2673 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2674 if (Flags.isByVal()) 2675 // ByVal argument is passed in as a pointer but it's now being 2676 // dereferenced. e.g. 2677 // define @foo(%struct.X* %A) { 2678 // tail call @bar(%struct.X* byval %A) 2679 // } 2680 return false; 2681 SDValue Ptr = Ld->getBasePtr(); 2682 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2683 if (!FINode) 2684 return false; 2685 FI = FINode->getIndex(); 2686 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) { 2687 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg); 2688 FI = FINode->getIndex(); 2689 Bytes = Flags.getByValSize(); 2690 } else 2691 return false; 2692 2693 assert(FI != INT_MAX); 2694 if (!MFI->isFixedObjectIndex(FI)) 2695 return false; 2696 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 2697} 2698 2699/// IsEligibleForTailCallOptimization - Check whether the call is eligible 2700/// for tail call optimization. Targets which want to do tail call 2701/// optimization should implement this function. 2702bool 2703X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2704 CallingConv::ID CalleeCC, 2705 bool isVarArg, 2706 bool isCalleeStructRet, 2707 bool isCallerStructRet, 2708 const SmallVectorImpl<ISD::OutputArg> &Outs, 2709 const SmallVectorImpl<SDValue> &OutVals, 2710 const SmallVectorImpl<ISD::InputArg> &Ins, 2711 SelectionDAG& DAG) const { 2712 if (!IsTailCallConvention(CalleeCC) && 2713 CalleeCC != CallingConv::C) 2714 return false; 2715 2716 // If -tailcallopt is specified, make fastcc functions tail-callable. 2717 const MachineFunction &MF = DAG.getMachineFunction(); 2718 const Function *CallerF = DAG.getMachineFunction().getFunction(); 2719 CallingConv::ID CallerCC = CallerF->getCallingConv(); 2720 bool CCMatch = CallerCC == CalleeCC; 2721 2722 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2723 if (IsTailCallConvention(CalleeCC) && CCMatch) 2724 return true; 2725 return false; 2726 } 2727 2728 // Look for obvious safe cases to perform tail call optimization that do not 2729 // require ABI changes. This is what gcc calls sibcall. 2730 2731 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to 2732 // emit a special epilogue. 2733 if (RegInfo->needsStackRealignment(MF)) 2734 return false; 2735 2736 // Also avoid sibcall optimization if either caller or callee uses struct 2737 // return semantics. 2738 if (isCalleeStructRet || isCallerStructRet) 2739 return false; 2740 2741 // An stdcall caller is expected to clean up its arguments; the callee 2742 // isn't going to do that. 2743 if (!CCMatch && CallerCC==CallingConv::X86_StdCall) 2744 return false; 2745 2746 // Do not sibcall optimize vararg calls unless all arguments are passed via 2747 // registers. 2748 if (isVarArg && !Outs.empty()) { 2749 2750 // Optimizing for varargs on Win64 is unlikely to be safe without 2751 // additional testing. 2752 if (Subtarget->isTargetWin64()) 2753 return false; 2754 2755 SmallVector<CCValAssign, 16> ArgLocs; 2756 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2757 getTargetMachine(), ArgLocs, *DAG.getContext()); 2758 2759 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2760 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) 2761 if (!ArgLocs[i].isRegLoc()) 2762 return false; 2763 } 2764 2765 // If the call result is in ST0 / ST1, it needs to be popped off the x87 2766 // stack. Therefore, if it's not used by the call it is not safe to optimize 2767 // this into a sibcall. 2768 bool Unused = false; 2769 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 2770 if (!Ins[i].Used) { 2771 Unused = true; 2772 break; 2773 } 2774 } 2775 if (Unused) { 2776 SmallVector<CCValAssign, 16> RVLocs; 2777 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), 2778 getTargetMachine(), RVLocs, *DAG.getContext()); 2779 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 2780 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 2781 CCValAssign &VA = RVLocs[i]; 2782 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) 2783 return false; 2784 } 2785 } 2786 2787 // If the calling conventions do not match, then we'd better make sure the 2788 // results are returned in the same way as what the caller expects. 2789 if (!CCMatch) { 2790 SmallVector<CCValAssign, 16> RVLocs1; 2791 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 2792 getTargetMachine(), RVLocs1, *DAG.getContext()); 2793 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86); 2794 2795 SmallVector<CCValAssign, 16> RVLocs2; 2796 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 2797 getTargetMachine(), RVLocs2, *DAG.getContext()); 2798 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86); 2799 2800 if (RVLocs1.size() != RVLocs2.size()) 2801 return false; 2802 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 2803 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 2804 return false; 2805 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 2806 return false; 2807 if (RVLocs1[i].isRegLoc()) { 2808 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 2809 return false; 2810 } else { 2811 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 2812 return false; 2813 } 2814 } 2815 } 2816 2817 // If the callee takes no arguments then go on to check the results of the 2818 // call. 2819 if (!Outs.empty()) { 2820 // Check if stack adjustment is needed. For now, do not do this if any 2821 // argument is passed on the stack. 2822 SmallVector<CCValAssign, 16> ArgLocs; 2823 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2824 getTargetMachine(), ArgLocs, *DAG.getContext()); 2825 2826 // Allocate shadow area for Win64 2827 if (Subtarget->isTargetWin64()) { 2828 CCInfo.AllocateStack(32, 8); 2829 } 2830 2831 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2832 if (CCInfo.getNextStackOffset()) { 2833 MachineFunction &MF = DAG.getMachineFunction(); 2834 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) 2835 return false; 2836 2837 // Check if the arguments are already laid out in the right way as 2838 // the caller's fixed stack objects. 2839 MachineFrameInfo *MFI = MF.getFrameInfo(); 2840 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2841 const X86InstrInfo *TII = 2842 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 2843 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2844 CCValAssign &VA = ArgLocs[i]; 2845 SDValue Arg = OutVals[i]; 2846 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2847 if (VA.getLocInfo() == CCValAssign::Indirect) 2848 return false; 2849 if (!VA.isRegLoc()) { 2850 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2851 MFI, MRI, TII)) 2852 return false; 2853 } 2854 } 2855 } 2856 2857 // If the tailcall address may be in a register, then make sure it's 2858 // possible to register allocate for it. In 32-bit, the call address can 2859 // only target EAX, EDX, or ECX since the tail call must be scheduled after 2860 // callee-saved registers are restored. These happen to be the same 2861 // registers used to pass 'inreg' arguments so watch out for those. 2862 if (!Subtarget->is64Bit() && 2863 !isa<GlobalAddressSDNode>(Callee) && 2864 !isa<ExternalSymbolSDNode>(Callee)) { 2865 unsigned NumInRegs = 0; 2866 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2867 CCValAssign &VA = ArgLocs[i]; 2868 if (!VA.isRegLoc()) 2869 continue; 2870 unsigned Reg = VA.getLocReg(); 2871 switch (Reg) { 2872 default: break; 2873 case X86::EAX: case X86::EDX: case X86::ECX: 2874 if (++NumInRegs == 3) 2875 return false; 2876 break; 2877 } 2878 } 2879 } 2880 } 2881 2882 return true; 2883} 2884 2885FastISel * 2886X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 2887 const TargetLibraryInfo *libInfo) const { 2888 return X86::createFastISel(funcInfo, libInfo); 2889} 2890 2891 2892//===----------------------------------------------------------------------===// 2893// Other Lowering Hooks 2894//===----------------------------------------------------------------------===// 2895 2896static bool MayFoldLoad(SDValue Op) { 2897 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode()); 2898} 2899 2900static bool MayFoldIntoStore(SDValue Op) { 2901 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); 2902} 2903 2904static bool isTargetShuffle(unsigned Opcode) { 2905 switch(Opcode) { 2906 default: return false; 2907 case X86ISD::PSHUFD: 2908 case X86ISD::PSHUFHW: 2909 case X86ISD::PSHUFLW: 2910 case X86ISD::SHUFP: 2911 case X86ISD::PALIGN: 2912 case X86ISD::MOVLHPS: 2913 case X86ISD::MOVLHPD: 2914 case X86ISD::MOVHLPS: 2915 case X86ISD::MOVLPS: 2916 case X86ISD::MOVLPD: 2917 case X86ISD::MOVSHDUP: 2918 case X86ISD::MOVSLDUP: 2919 case X86ISD::MOVDDUP: 2920 case X86ISD::MOVSS: 2921 case X86ISD::MOVSD: 2922 case X86ISD::UNPCKL: 2923 case X86ISD::UNPCKH: 2924 case X86ISD::VPERMILP: 2925 case X86ISD::VPERM2X128: 2926 case X86ISD::VPERMI: 2927 return true; 2928 } 2929} 2930 2931static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2932 SDValue V1, SelectionDAG &DAG) { 2933 switch(Opc) { 2934 default: llvm_unreachable("Unknown x86 shuffle node"); 2935 case X86ISD::MOVSHDUP: 2936 case X86ISD::MOVSLDUP: 2937 case X86ISD::MOVDDUP: 2938 return DAG.getNode(Opc, dl, VT, V1); 2939 } 2940} 2941 2942static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2943 SDValue V1, unsigned TargetMask, 2944 SelectionDAG &DAG) { 2945 switch(Opc) { 2946 default: llvm_unreachable("Unknown x86 shuffle node"); 2947 case X86ISD::PSHUFD: 2948 case X86ISD::PSHUFHW: 2949 case X86ISD::PSHUFLW: 2950 case X86ISD::VPERMILP: 2951 case X86ISD::VPERMI: 2952 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8)); 2953 } 2954} 2955 2956static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2957 SDValue V1, SDValue V2, unsigned TargetMask, 2958 SelectionDAG &DAG) { 2959 switch(Opc) { 2960 default: llvm_unreachable("Unknown x86 shuffle node"); 2961 case X86ISD::PALIGN: 2962 case X86ISD::SHUFP: 2963 case X86ISD::VPERM2X128: 2964 return DAG.getNode(Opc, dl, VT, V1, V2, 2965 DAG.getConstant(TargetMask, MVT::i8)); 2966 } 2967} 2968 2969static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2970 SDValue V1, SDValue V2, SelectionDAG &DAG) { 2971 switch(Opc) { 2972 default: llvm_unreachable("Unknown x86 shuffle node"); 2973 case X86ISD::MOVLHPS: 2974 case X86ISD::MOVLHPD: 2975 case X86ISD::MOVHLPS: 2976 case X86ISD::MOVLPS: 2977 case X86ISD::MOVLPD: 2978 case X86ISD::MOVSS: 2979 case X86ISD::MOVSD: 2980 case X86ISD::UNPCKL: 2981 case X86ISD::UNPCKH: 2982 return DAG.getNode(Opc, dl, VT, V1, V2); 2983 } 2984} 2985 2986SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { 2987 MachineFunction &MF = DAG.getMachineFunction(); 2988 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 2989 int ReturnAddrIndex = FuncInfo->getRAIndex(); 2990 2991 if (ReturnAddrIndex == 0) { 2992 // Set up a frame object for the return address. 2993 uint64_t SlotSize = TD->getPointerSize(); 2994 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize, 2995 false); 2996 FuncInfo->setRAIndex(ReturnAddrIndex); 2997 } 2998 2999 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 3000} 3001 3002 3003bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, 3004 bool hasSymbolicDisplacement) { 3005 // Offset should fit into 32 bit immediate field. 3006 if (!isInt<32>(Offset)) 3007 return false; 3008 3009 // If we don't have a symbolic displacement - we don't have any extra 3010 // restrictions. 3011 if (!hasSymbolicDisplacement) 3012 return true; 3013 3014 // FIXME: Some tweaks might be needed for medium code model. 3015 if (M != CodeModel::Small && M != CodeModel::Kernel) 3016 return false; 3017 3018 // For small code model we assume that latest object is 16MB before end of 31 3019 // bits boundary. We may also accept pretty large negative constants knowing 3020 // that all objects are in the positive half of address space. 3021 if (M == CodeModel::Small && Offset < 16*1024*1024) 3022 return true; 3023 3024 // For kernel code model we know that all object resist in the negative half 3025 // of 32bits address space. We may not accept negative offsets, since they may 3026 // be just off and we may accept pretty large positive ones. 3027 if (M == CodeModel::Kernel && Offset > 0) 3028 return true; 3029 3030 return false; 3031} 3032 3033/// isCalleePop - Determines whether the callee is required to pop its 3034/// own arguments. Callee pop is necessary to support tail calls. 3035bool X86::isCalleePop(CallingConv::ID CallingConv, 3036 bool is64Bit, bool IsVarArg, bool TailCallOpt) { 3037 if (IsVarArg) 3038 return false; 3039 3040 switch (CallingConv) { 3041 default: 3042 return false; 3043 case CallingConv::X86_StdCall: 3044 return !is64Bit; 3045 case CallingConv::X86_FastCall: 3046 return !is64Bit; 3047 case CallingConv::X86_ThisCall: 3048 return !is64Bit; 3049 case CallingConv::Fast: 3050 return TailCallOpt; 3051 case CallingConv::GHC: 3052 return TailCallOpt; 3053 } 3054} 3055 3056/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86 3057/// specific condition code, returning the condition code and the LHS/RHS of the 3058/// comparison to make. 3059static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 3060 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) { 3061 if (!isFP) { 3062 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 3063 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 3064 // X > -1 -> X == 0, jump !sign. 3065 RHS = DAG.getConstant(0, RHS.getValueType()); 3066 return X86::COND_NS; 3067 } 3068 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 3069 // X < 0 -> X == 0, jump on sign. 3070 return X86::COND_S; 3071 } 3072 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { 3073 // X < 1 -> X <= 0 3074 RHS = DAG.getConstant(0, RHS.getValueType()); 3075 return X86::COND_LE; 3076 } 3077 } 3078 3079 switch (SetCCOpcode) { 3080 default: llvm_unreachable("Invalid integer condition!"); 3081 case ISD::SETEQ: return X86::COND_E; 3082 case ISD::SETGT: return X86::COND_G; 3083 case ISD::SETGE: return X86::COND_GE; 3084 case ISD::SETLT: return X86::COND_L; 3085 case ISD::SETLE: return X86::COND_LE; 3086 case ISD::SETNE: return X86::COND_NE; 3087 case ISD::SETULT: return X86::COND_B; 3088 case ISD::SETUGT: return X86::COND_A; 3089 case ISD::SETULE: return X86::COND_BE; 3090 case ISD::SETUGE: return X86::COND_AE; 3091 } 3092 } 3093 3094 // First determine if it is required or is profitable to flip the operands. 3095 3096 // If LHS is a foldable load, but RHS is not, flip the condition. 3097 if (ISD::isNON_EXTLoad(LHS.getNode()) && 3098 !ISD::isNON_EXTLoad(RHS.getNode())) { 3099 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode); 3100 std::swap(LHS, RHS); 3101 } 3102 3103 switch (SetCCOpcode) { 3104 default: break; 3105 case ISD::SETOLT: 3106 case ISD::SETOLE: 3107 case ISD::SETUGT: 3108 case ISD::SETUGE: 3109 std::swap(LHS, RHS); 3110 break; 3111 } 3112 3113 // On a floating point condition, the flags are set as follows: 3114 // ZF PF CF op 3115 // 0 | 0 | 0 | X > Y 3116 // 0 | 0 | 1 | X < Y 3117 // 1 | 0 | 0 | X == Y 3118 // 1 | 1 | 1 | unordered 3119 switch (SetCCOpcode) { 3120 default: llvm_unreachable("Condcode should be pre-legalized away"); 3121 case ISD::SETUEQ: 3122 case ISD::SETEQ: return X86::COND_E; 3123 case ISD::SETOLT: // flipped 3124 case ISD::SETOGT: 3125 case ISD::SETGT: return X86::COND_A; 3126 case ISD::SETOLE: // flipped 3127 case ISD::SETOGE: 3128 case ISD::SETGE: return X86::COND_AE; 3129 case ISD::SETUGT: // flipped 3130 case ISD::SETULT: 3131 case ISD::SETLT: return X86::COND_B; 3132 case ISD::SETUGE: // flipped 3133 case ISD::SETULE: 3134 case ISD::SETLE: return X86::COND_BE; 3135 case ISD::SETONE: 3136 case ISD::SETNE: return X86::COND_NE; 3137 case ISD::SETUO: return X86::COND_P; 3138 case ISD::SETO: return X86::COND_NP; 3139 case ISD::SETOEQ: 3140 case ISD::SETUNE: return X86::COND_INVALID; 3141 } 3142} 3143 3144/// hasFPCMov - is there a floating point cmov for the specific X86 condition 3145/// code. Current x86 isa includes the following FP cmov instructions: 3146/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 3147static bool hasFPCMov(unsigned X86CC) { 3148 switch (X86CC) { 3149 default: 3150 return false; 3151 case X86::COND_B: 3152 case X86::COND_BE: 3153 case X86::COND_E: 3154 case X86::COND_P: 3155 case X86::COND_A: 3156 case X86::COND_AE: 3157 case X86::COND_NE: 3158 case X86::COND_NP: 3159 return true; 3160 } 3161} 3162 3163/// isFPImmLegal - Returns true if the target can instruction select the 3164/// specified FP immediate natively. If false, the legalizer will 3165/// materialize the FP immediate as a load from a constant pool. 3166bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 3167 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) { 3168 if (Imm.bitwiseIsEqual(LegalFPImmediates[i])) 3169 return true; 3170 } 3171 return false; 3172} 3173 3174/// isUndefOrInRange - Return true if Val is undef or if its value falls within 3175/// the specified range (L, H]. 3176static bool isUndefOrInRange(int Val, int Low, int Hi) { 3177 return (Val < 0) || (Val >= Low && Val < Hi); 3178} 3179 3180/// isUndefOrEqual - Val is either less than zero (undef) or equal to the 3181/// specified value. 3182static bool isUndefOrEqual(int Val, int CmpVal) { 3183 if (Val < 0 || Val == CmpVal) 3184 return true; 3185 return false; 3186} 3187 3188/// isSequentialOrUndefInRange - Return true if every element in Mask, beginning 3189/// from position Pos and ending in Pos+Size, falls within the specified 3190/// sequential range (L, L+Pos]. or is undef. 3191static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, 3192 unsigned Pos, unsigned Size, int Low) { 3193 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low) 3194 if (!isUndefOrEqual(Mask[i], Low)) 3195 return false; 3196 return true; 3197} 3198 3199/// isPSHUFDMask - Return true if the node specifies a shuffle of elements that 3200/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference 3201/// the second operand. 3202static bool isPSHUFDMask(ArrayRef<int> Mask, EVT VT) { 3203 if (VT == MVT::v4f32 || VT == MVT::v4i32 ) 3204 return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4); 3205 if (VT == MVT::v2f64 || VT == MVT::v2i64) 3206 return (Mask[0] < 2 && Mask[1] < 2); 3207 return false; 3208} 3209 3210/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that 3211/// is suitable for input to PSHUFHW. 3212static bool isPSHUFHWMask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) { 3213 if (VT != MVT::v8i16 && (!HasAVX2 || VT != MVT::v16i16)) 3214 return false; 3215 3216 // Lower quadword copied in order or undef. 3217 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0)) 3218 return false; 3219 3220 // Upper quadword shuffled. 3221 for (unsigned i = 4; i != 8; ++i) 3222 if (!isUndefOrInRange(Mask[i], 4, 8)) 3223 return false; 3224 3225 if (VT == MVT::v16i16) { 3226 // Lower quadword copied in order or undef. 3227 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8)) 3228 return false; 3229 3230 // Upper quadword shuffled. 3231 for (unsigned i = 12; i != 16; ++i) 3232 if (!isUndefOrInRange(Mask[i], 12, 16)) 3233 return false; 3234 } 3235 3236 return true; 3237} 3238 3239/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that 3240/// is suitable for input to PSHUFLW. 3241static bool isPSHUFLWMask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) { 3242 if (VT != MVT::v8i16 && (!HasAVX2 || VT != MVT::v16i16)) 3243 return false; 3244 3245 // Upper quadword copied in order. 3246 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4)) 3247 return false; 3248 3249 // Lower quadword shuffled. 3250 for (unsigned i = 0; i != 4; ++i) 3251 if (!isUndefOrInRange(Mask[i], 0, 4)) 3252 return false; 3253 3254 if (VT == MVT::v16i16) { 3255 // Upper quadword copied in order. 3256 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12)) 3257 return false; 3258 3259 // Lower quadword shuffled. 3260 for (unsigned i = 8; i != 12; ++i) 3261 if (!isUndefOrInRange(Mask[i], 8, 12)) 3262 return false; 3263 } 3264 3265 return true; 3266} 3267 3268/// isPALIGNRMask - Return true if the node specifies a shuffle of elements that 3269/// is suitable for input to PALIGNR. 3270static bool isPALIGNRMask(ArrayRef<int> Mask, EVT VT, 3271 const X86Subtarget *Subtarget) { 3272 if ((VT.getSizeInBits() == 128 && !Subtarget->hasSSSE3()) || 3273 (VT.getSizeInBits() == 256 && !Subtarget->hasAVX2())) 3274 return false; 3275 3276 unsigned NumElts = VT.getVectorNumElements(); 3277 unsigned NumLanes = VT.getSizeInBits()/128; 3278 unsigned NumLaneElts = NumElts/NumLanes; 3279 3280 // Do not handle 64-bit element shuffles with palignr. 3281 if (NumLaneElts == 2) 3282 return false; 3283 3284 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) { 3285 unsigned i; 3286 for (i = 0; i != NumLaneElts; ++i) { 3287 if (Mask[i+l] >= 0) 3288 break; 3289 } 3290 3291 // Lane is all undef, go to next lane 3292 if (i == NumLaneElts) 3293 continue; 3294 3295 int Start = Mask[i+l]; 3296 3297 // Make sure its in this lane in one of the sources 3298 if (!isUndefOrInRange(Start, l, l+NumLaneElts) && 3299 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts)) 3300 return false; 3301 3302 // If not lane 0, then we must match lane 0 3303 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l)) 3304 return false; 3305 3306 // Correct second source to be contiguous with first source 3307 if (Start >= (int)NumElts) 3308 Start -= NumElts - NumLaneElts; 3309 3310 // Make sure we're shifting in the right direction. 3311 if (Start <= (int)(i+l)) 3312 return false; 3313 3314 Start -= i; 3315 3316 // Check the rest of the elements to see if they are consecutive. 3317 for (++i; i != NumLaneElts; ++i) { 3318 int Idx = Mask[i+l]; 3319 3320 // Make sure its in this lane 3321 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) && 3322 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts)) 3323 return false; 3324 3325 // If not lane 0, then we must match lane 0 3326 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l)) 3327 return false; 3328 3329 if (Idx >= (int)NumElts) 3330 Idx -= NumElts - NumLaneElts; 3331 3332 if (!isUndefOrEqual(Idx, Start+i)) 3333 return false; 3334 3335 } 3336 } 3337 3338 return true; 3339} 3340 3341/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 3342/// the two vector operands have swapped position. 3343static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, 3344 unsigned NumElems) { 3345 for (unsigned i = 0; i != NumElems; ++i) { 3346 int idx = Mask[i]; 3347 if (idx < 0) 3348 continue; 3349 else if (idx < (int)NumElems) 3350 Mask[i] = idx + NumElems; 3351 else 3352 Mask[i] = idx - NumElems; 3353 } 3354} 3355 3356/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 3357/// specifies a shuffle of elements that is suitable for input to 128/256-bit 3358/// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be 3359/// reverse of what x86 shuffles want. 3360static bool isSHUFPMask(ArrayRef<int> Mask, EVT VT, bool HasAVX, 3361 bool Commuted = false) { 3362 if (!HasAVX && VT.getSizeInBits() == 256) 3363 return false; 3364 3365 unsigned NumElems = VT.getVectorNumElements(); 3366 unsigned NumLanes = VT.getSizeInBits()/128; 3367 unsigned NumLaneElems = NumElems/NumLanes; 3368 3369 if (NumLaneElems != 2 && NumLaneElems != 4) 3370 return false; 3371 3372 // VSHUFPSY divides the resulting vector into 4 chunks. 3373 // The sources are also splitted into 4 chunks, and each destination 3374 // chunk must come from a different source chunk. 3375 // 3376 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0 3377 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9 3378 // 3379 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4, 3380 // Y3..Y0, Y3..Y0, X3..X0, X3..X0 3381 // 3382 // VSHUFPDY divides the resulting vector into 4 chunks. 3383 // The sources are also splitted into 4 chunks, and each destination 3384 // chunk must come from a different source chunk. 3385 // 3386 // SRC1 => X3 X2 X1 X0 3387 // SRC2 => Y3 Y2 Y1 Y0 3388 // 3389 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0 3390 // 3391 unsigned HalfLaneElems = NumLaneElems/2; 3392 for (unsigned l = 0; l != NumElems; l += NumLaneElems) { 3393 for (unsigned i = 0; i != NumLaneElems; ++i) { 3394 int Idx = Mask[i+l]; 3395 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0); 3396 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems)) 3397 return false; 3398 // For VSHUFPSY, the mask of the second half must be the same as the 3399 // first but with the appropriate offsets. This works in the same way as 3400 // VPERMILPS works with masks. 3401 if (NumElems != 8 || l == 0 || Mask[i] < 0) 3402 continue; 3403 if (!isUndefOrEqual(Idx, Mask[i]+l)) 3404 return false; 3405 } 3406 } 3407 3408 return true; 3409} 3410 3411/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 3412/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 3413static bool isMOVHLPSMask(ArrayRef<int> Mask, EVT VT) { 3414 if (!VT.is128BitVector()) 3415 return false; 3416 3417 unsigned NumElems = VT.getVectorNumElements(); 3418 3419 if (NumElems != 4) 3420 return false; 3421 3422 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 3423 return isUndefOrEqual(Mask[0], 6) && 3424 isUndefOrEqual(Mask[1], 7) && 3425 isUndefOrEqual(Mask[2], 2) && 3426 isUndefOrEqual(Mask[3], 3); 3427} 3428 3429/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 3430/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 3431/// <2, 3, 2, 3> 3432static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, EVT VT) { 3433 if (!VT.is128BitVector()) 3434 return false; 3435 3436 unsigned NumElems = VT.getVectorNumElements(); 3437 3438 if (NumElems != 4) 3439 return false; 3440 3441 return isUndefOrEqual(Mask[0], 2) && 3442 isUndefOrEqual(Mask[1], 3) && 3443 isUndefOrEqual(Mask[2], 2) && 3444 isUndefOrEqual(Mask[3], 3); 3445} 3446 3447/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 3448/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 3449static bool isMOVLPMask(ArrayRef<int> Mask, EVT VT) { 3450 if (!VT.is128BitVector()) 3451 return false; 3452 3453 unsigned NumElems = VT.getVectorNumElements(); 3454 3455 if (NumElems != 2 && NumElems != 4) 3456 return false; 3457 3458 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3459 if (!isUndefOrEqual(Mask[i], i + NumElems)) 3460 return false; 3461 3462 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i) 3463 if (!isUndefOrEqual(Mask[i], i)) 3464 return false; 3465 3466 return true; 3467} 3468 3469/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand 3470/// specifies a shuffle of elements that is suitable for input to MOVLHPS. 3471static bool isMOVLHPSMask(ArrayRef<int> Mask, EVT VT) { 3472 if (!VT.is128BitVector()) 3473 return false; 3474 3475 unsigned NumElems = VT.getVectorNumElements(); 3476 3477 if (NumElems != 2 && NumElems != 4) 3478 return false; 3479 3480 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3481 if (!isUndefOrEqual(Mask[i], i)) 3482 return false; 3483 3484 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3485 if (!isUndefOrEqual(Mask[i + e], i + NumElems)) 3486 return false; 3487 3488 return true; 3489} 3490 3491// 3492// Some special combinations that can be optimized. 3493// 3494static 3495SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp, 3496 SelectionDAG &DAG) { 3497 EVT VT = SVOp->getValueType(0); 3498 DebugLoc dl = SVOp->getDebugLoc(); 3499 3500 if (VT != MVT::v8i32 && VT != MVT::v8f32) 3501 return SDValue(); 3502 3503 ArrayRef<int> Mask = SVOp->getMask(); 3504 3505 // These are the special masks that may be optimized. 3506 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14}; 3507 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15}; 3508 bool MatchEvenMask = true; 3509 bool MatchOddMask = true; 3510 for (int i=0; i<8; ++i) { 3511 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i])) 3512 MatchEvenMask = false; 3513 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i])) 3514 MatchOddMask = false; 3515 } 3516 static const int CompactionMaskEven[] = {0, 2, -1, -1, 4, 6, -1, -1}; 3517 static const int CompactionMaskOdd [] = {1, 3, -1, -1, 5, 7, -1, -1}; 3518 3519 const int *CompactionMask; 3520 if (MatchEvenMask) 3521 CompactionMask = CompactionMaskEven; 3522 else if (MatchOddMask) 3523 CompactionMask = CompactionMaskOdd; 3524 else 3525 return SDValue(); 3526 3527 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT); 3528 3529 SDValue Op0 = DAG.getVectorShuffle(VT, dl, SVOp->getOperand(0), 3530 UndefNode, CompactionMask); 3531 SDValue Op1 = DAG.getVectorShuffle(VT, dl, SVOp->getOperand(1), 3532 UndefNode, CompactionMask); 3533 static const int UnpackMask[] = {0, 8, 1, 9, 4, 12, 5, 13}; 3534 return DAG.getVectorShuffle(VT, dl, Op0, Op1, UnpackMask); 3535} 3536 3537/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 3538/// specifies a shuffle of elements that is suitable for input to UNPCKL. 3539static bool isUNPCKLMask(ArrayRef<int> Mask, EVT VT, 3540 bool HasAVX2, bool V2IsSplat = false) { 3541 unsigned NumElts = VT.getVectorNumElements(); 3542 3543 assert((VT.is128BitVector() || VT.is256BitVector()) && 3544 "Unsupported vector type for unpckh"); 3545 3546 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3547 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3548 return false; 3549 3550 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3551 // independently on 128-bit lanes. 3552 unsigned NumLanes = VT.getSizeInBits()/128; 3553 unsigned NumLaneElts = NumElts/NumLanes; 3554 3555 for (unsigned l = 0; l != NumLanes; ++l) { 3556 for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; 3557 i != (l+1)*NumLaneElts; 3558 i += 2, ++j) { 3559 int BitI = Mask[i]; 3560 int BitI1 = Mask[i+1]; 3561 if (!isUndefOrEqual(BitI, j)) 3562 return false; 3563 if (V2IsSplat) { 3564 if (!isUndefOrEqual(BitI1, NumElts)) 3565 return false; 3566 } else { 3567 if (!isUndefOrEqual(BitI1, j + NumElts)) 3568 return false; 3569 } 3570 } 3571 } 3572 3573 return true; 3574} 3575 3576/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 3577/// specifies a shuffle of elements that is suitable for input to UNPCKH. 3578static bool isUNPCKHMask(ArrayRef<int> Mask, EVT VT, 3579 bool HasAVX2, bool V2IsSplat = false) { 3580 unsigned NumElts = VT.getVectorNumElements(); 3581 3582 assert((VT.is128BitVector() || VT.is256BitVector()) && 3583 "Unsupported vector type for unpckh"); 3584 3585 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3586 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3587 return false; 3588 3589 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3590 // independently on 128-bit lanes. 3591 unsigned NumLanes = VT.getSizeInBits()/128; 3592 unsigned NumLaneElts = NumElts/NumLanes; 3593 3594 for (unsigned l = 0; l != NumLanes; ++l) { 3595 for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; 3596 i != (l+1)*NumLaneElts; i += 2, ++j) { 3597 int BitI = Mask[i]; 3598 int BitI1 = Mask[i+1]; 3599 if (!isUndefOrEqual(BitI, j)) 3600 return false; 3601 if (V2IsSplat) { 3602 if (isUndefOrEqual(BitI1, NumElts)) 3603 return false; 3604 } else { 3605 if (!isUndefOrEqual(BitI1, j+NumElts)) 3606 return false; 3607 } 3608 } 3609 } 3610 return true; 3611} 3612 3613/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 3614/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 3615/// <0, 0, 1, 1> 3616static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, EVT VT, 3617 bool HasAVX2) { 3618 unsigned NumElts = VT.getVectorNumElements(); 3619 3620 assert((VT.is128BitVector() || VT.is256BitVector()) && 3621 "Unsupported vector type for unpckh"); 3622 3623 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3624 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3625 return false; 3626 3627 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern 3628 // FIXME: Need a better way to get rid of this, there's no latency difference 3629 // between UNPCKLPD and MOVDDUP, the later should always be checked first and 3630 // the former later. We should also remove the "_undef" special mask. 3631 if (NumElts == 4 && VT.getSizeInBits() == 256) 3632 return false; 3633 3634 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3635 // independently on 128-bit lanes. 3636 unsigned NumLanes = VT.getSizeInBits()/128; 3637 unsigned NumLaneElts = NumElts/NumLanes; 3638 3639 for (unsigned l = 0; l != NumLanes; ++l) { 3640 for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; 3641 i != (l+1)*NumLaneElts; 3642 i += 2, ++j) { 3643 int BitI = Mask[i]; 3644 int BitI1 = Mask[i+1]; 3645 3646 if (!isUndefOrEqual(BitI, j)) 3647 return false; 3648 if (!isUndefOrEqual(BitI1, j)) 3649 return false; 3650 } 3651 } 3652 3653 return true; 3654} 3655 3656/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 3657/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 3658/// <2, 2, 3, 3> 3659static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) { 3660 unsigned NumElts = VT.getVectorNumElements(); 3661 3662 assert((VT.is128BitVector() || VT.is256BitVector()) && 3663 "Unsupported vector type for unpckh"); 3664 3665 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3666 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3667 return false; 3668 3669 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3670 // independently on 128-bit lanes. 3671 unsigned NumLanes = VT.getSizeInBits()/128; 3672 unsigned NumLaneElts = NumElts/NumLanes; 3673 3674 for (unsigned l = 0; l != NumLanes; ++l) { 3675 for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; 3676 i != (l+1)*NumLaneElts; i += 2, ++j) { 3677 int BitI = Mask[i]; 3678 int BitI1 = Mask[i+1]; 3679 if (!isUndefOrEqual(BitI, j)) 3680 return false; 3681 if (!isUndefOrEqual(BitI1, j)) 3682 return false; 3683 } 3684 } 3685 return true; 3686} 3687 3688/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 3689/// specifies a shuffle of elements that is suitable for input to MOVSS, 3690/// MOVSD, and MOVD, i.e. setting the lowest element. 3691static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) { 3692 if (VT.getVectorElementType().getSizeInBits() < 32) 3693 return false; 3694 if (!VT.is128BitVector()) 3695 return false; 3696 3697 unsigned NumElts = VT.getVectorNumElements(); 3698 3699 if (!isUndefOrEqual(Mask[0], NumElts)) 3700 return false; 3701 3702 for (unsigned i = 1; i != NumElts; ++i) 3703 if (!isUndefOrEqual(Mask[i], i)) 3704 return false; 3705 3706 return true; 3707} 3708 3709/// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered 3710/// as permutations between 128-bit chunks or halves. As an example: this 3711/// shuffle bellow: 3712/// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15> 3713/// The first half comes from the second half of V1 and the second half from the 3714/// the second half of V2. 3715static bool isVPERM2X128Mask(ArrayRef<int> Mask, EVT VT, bool HasAVX) { 3716 if (!HasAVX || !VT.is256BitVector()) 3717 return false; 3718 3719 // The shuffle result is divided into half A and half B. In total the two 3720 // sources have 4 halves, namely: C, D, E, F. The final values of A and 3721 // B must come from C, D, E or F. 3722 unsigned HalfSize = VT.getVectorNumElements()/2; 3723 bool MatchA = false, MatchB = false; 3724 3725 // Check if A comes from one of C, D, E, F. 3726 for (unsigned Half = 0; Half != 4; ++Half) { 3727 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) { 3728 MatchA = true; 3729 break; 3730 } 3731 } 3732 3733 // Check if B comes from one of C, D, E, F. 3734 for (unsigned Half = 0; Half != 4; ++Half) { 3735 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) { 3736 MatchB = true; 3737 break; 3738 } 3739 } 3740 3741 return MatchA && MatchB; 3742} 3743 3744/// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle 3745/// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions. 3746static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) { 3747 EVT VT = SVOp->getValueType(0); 3748 3749 unsigned HalfSize = VT.getVectorNumElements()/2; 3750 3751 unsigned FstHalf = 0, SndHalf = 0; 3752 for (unsigned i = 0; i < HalfSize; ++i) { 3753 if (SVOp->getMaskElt(i) > 0) { 3754 FstHalf = SVOp->getMaskElt(i)/HalfSize; 3755 break; 3756 } 3757 } 3758 for (unsigned i = HalfSize; i < HalfSize*2; ++i) { 3759 if (SVOp->getMaskElt(i) > 0) { 3760 SndHalf = SVOp->getMaskElt(i)/HalfSize; 3761 break; 3762 } 3763 } 3764 3765 return (FstHalf | (SndHalf << 4)); 3766} 3767 3768/// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand 3769/// specifies a shuffle of elements that is suitable for input to VPERMILPD*. 3770/// Note that VPERMIL mask matching is different depending whether theunderlying 3771/// type is 32 or 64. In the VPERMILPS the high half of the mask should point 3772/// to the same elements of the low, but to the higher half of the source. 3773/// In VPERMILPD the two lanes could be shuffled independently of each other 3774/// with the same restriction that lanes can't be crossed. Also handles PSHUFDY. 3775static bool isVPERMILPMask(ArrayRef<int> Mask, EVT VT, bool HasAVX) { 3776 if (!HasAVX) 3777 return false; 3778 3779 unsigned NumElts = VT.getVectorNumElements(); 3780 // Only match 256-bit with 32/64-bit types 3781 if (VT.getSizeInBits() != 256 || (NumElts != 4 && NumElts != 8)) 3782 return false; 3783 3784 unsigned NumLanes = VT.getSizeInBits()/128; 3785 unsigned LaneSize = NumElts/NumLanes; 3786 for (unsigned l = 0; l != NumElts; l += LaneSize) { 3787 for (unsigned i = 0; i != LaneSize; ++i) { 3788 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize)) 3789 return false; 3790 if (NumElts != 8 || l == 0) 3791 continue; 3792 // VPERMILPS handling 3793 if (Mask[i] < 0) 3794 continue; 3795 if (!isUndefOrEqual(Mask[i+l], Mask[i]+l)) 3796 return false; 3797 } 3798 } 3799 3800 return true; 3801} 3802 3803/// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse 3804/// of what x86 movss want. X86 movs requires the lowest element to be lowest 3805/// element of vector 2 and the other elements to come from vector 1 in order. 3806static bool isCommutedMOVLMask(ArrayRef<int> Mask, EVT VT, 3807 bool V2IsSplat = false, bool V2IsUndef = false) { 3808 if (!VT.is128BitVector()) 3809 return false; 3810 3811 unsigned NumOps = VT.getVectorNumElements(); 3812 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 3813 return false; 3814 3815 if (!isUndefOrEqual(Mask[0], 0)) 3816 return false; 3817 3818 for (unsigned i = 1; i != NumOps; ++i) 3819 if (!(isUndefOrEqual(Mask[i], i+NumOps) || 3820 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) || 3821 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps)))) 3822 return false; 3823 3824 return true; 3825} 3826 3827/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3828/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 3829/// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7> 3830static bool isMOVSHDUPMask(ArrayRef<int> Mask, EVT VT, 3831 const X86Subtarget *Subtarget) { 3832 if (!Subtarget->hasSSE3()) 3833 return false; 3834 3835 unsigned NumElems = VT.getVectorNumElements(); 3836 3837 if ((VT.getSizeInBits() == 128 && NumElems != 4) || 3838 (VT.getSizeInBits() == 256 && NumElems != 8)) 3839 return false; 3840 3841 // "i+1" is the value the indexed mask element must have 3842 for (unsigned i = 0; i != NumElems; i += 2) 3843 if (!isUndefOrEqual(Mask[i], i+1) || 3844 !isUndefOrEqual(Mask[i+1], i+1)) 3845 return false; 3846 3847 return true; 3848} 3849 3850/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3851/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 3852/// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6> 3853static bool isMOVSLDUPMask(ArrayRef<int> Mask, EVT VT, 3854 const X86Subtarget *Subtarget) { 3855 if (!Subtarget->hasSSE3()) 3856 return false; 3857 3858 unsigned NumElems = VT.getVectorNumElements(); 3859 3860 if ((VT.getSizeInBits() == 128 && NumElems != 4) || 3861 (VT.getSizeInBits() == 256 && NumElems != 8)) 3862 return false; 3863 3864 // "i" is the value the indexed mask element must have 3865 for (unsigned i = 0; i != NumElems; i += 2) 3866 if (!isUndefOrEqual(Mask[i], i) || 3867 !isUndefOrEqual(Mask[i+1], i)) 3868 return false; 3869 3870 return true; 3871} 3872 3873/// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand 3874/// specifies a shuffle of elements that is suitable for input to 256-bit 3875/// version of MOVDDUP. 3876static bool isMOVDDUPYMask(ArrayRef<int> Mask, EVT VT, bool HasAVX) { 3877 if (!HasAVX || !VT.is256BitVector()) 3878 return false; 3879 3880 unsigned NumElts = VT.getVectorNumElements(); 3881 if (NumElts != 4) 3882 return false; 3883 3884 for (unsigned i = 0; i != NumElts/2; ++i) 3885 if (!isUndefOrEqual(Mask[i], 0)) 3886 return false; 3887 for (unsigned i = NumElts/2; i != NumElts; ++i) 3888 if (!isUndefOrEqual(Mask[i], NumElts/2)) 3889 return false; 3890 return true; 3891} 3892 3893/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3894/// specifies a shuffle of elements that is suitable for input to 128-bit 3895/// version of MOVDDUP. 3896static bool isMOVDDUPMask(ArrayRef<int> Mask, EVT VT) { 3897 if (!VT.is128BitVector()) 3898 return false; 3899 3900 unsigned e = VT.getVectorNumElements() / 2; 3901 for (unsigned i = 0; i != e; ++i) 3902 if (!isUndefOrEqual(Mask[i], i)) 3903 return false; 3904 for (unsigned i = 0; i != e; ++i) 3905 if (!isUndefOrEqual(Mask[e+i], i)) 3906 return false; 3907 return true; 3908} 3909 3910/// isVEXTRACTF128Index - Return true if the specified 3911/// EXTRACT_SUBVECTOR operand specifies a vector extract that is 3912/// suitable for input to VEXTRACTF128. 3913bool X86::isVEXTRACTF128Index(SDNode *N) { 3914 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 3915 return false; 3916 3917 // The index should be aligned on a 128-bit boundary. 3918 uint64_t Index = 3919 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 3920 3921 unsigned VL = N->getValueType(0).getVectorNumElements(); 3922 unsigned VBits = N->getValueType(0).getSizeInBits(); 3923 unsigned ElSize = VBits / VL; 3924 bool Result = (Index * ElSize) % 128 == 0; 3925 3926 return Result; 3927} 3928 3929/// isVINSERTF128Index - Return true if the specified INSERT_SUBVECTOR 3930/// operand specifies a subvector insert that is suitable for input to 3931/// VINSERTF128. 3932bool X86::isVINSERTF128Index(SDNode *N) { 3933 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 3934 return false; 3935 3936 // The index should be aligned on a 128-bit boundary. 3937 uint64_t Index = 3938 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 3939 3940 unsigned VL = N->getValueType(0).getVectorNumElements(); 3941 unsigned VBits = N->getValueType(0).getSizeInBits(); 3942 unsigned ElSize = VBits / VL; 3943 bool Result = (Index * ElSize) % 128 == 0; 3944 3945 return Result; 3946} 3947 3948/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 3949/// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions. 3950/// Handles 128-bit and 256-bit. 3951static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) { 3952 EVT VT = N->getValueType(0); 3953 3954 assert((VT.is128BitVector() || VT.is256BitVector()) && 3955 "Unsupported vector type for PSHUF/SHUFP"); 3956 3957 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate 3958 // independently on 128-bit lanes. 3959 unsigned NumElts = VT.getVectorNumElements(); 3960 unsigned NumLanes = VT.getSizeInBits()/128; 3961 unsigned NumLaneElts = NumElts/NumLanes; 3962 3963 assert((NumLaneElts == 2 || NumLaneElts == 4) && 3964 "Only supports 2 or 4 elements per lane"); 3965 3966 unsigned Shift = (NumLaneElts == 4) ? 1 : 0; 3967 unsigned Mask = 0; 3968 for (unsigned i = 0; i != NumElts; ++i) { 3969 int Elt = N->getMaskElt(i); 3970 if (Elt < 0) continue; 3971 Elt &= NumLaneElts - 1; 3972 unsigned ShAmt = (i << Shift) % 8; 3973 Mask |= Elt << ShAmt; 3974 } 3975 3976 return Mask; 3977} 3978 3979/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 3980/// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction. 3981static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) { 3982 EVT VT = N->getValueType(0); 3983 3984 assert((VT == MVT::v8i16 || VT == MVT::v16i16) && 3985 "Unsupported vector type for PSHUFHW"); 3986 3987 unsigned NumElts = VT.getVectorNumElements(); 3988 3989 unsigned Mask = 0; 3990 for (unsigned l = 0; l != NumElts; l += 8) { 3991 // 8 nodes per lane, but we only care about the last 4. 3992 for (unsigned i = 0; i < 4; ++i) { 3993 int Elt = N->getMaskElt(l+i+4); 3994 if (Elt < 0) continue; 3995 Elt &= 0x3; // only 2-bits. 3996 Mask |= Elt << (i * 2); 3997 } 3998 } 3999 4000 return Mask; 4001} 4002 4003/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 4004/// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction. 4005static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) { 4006 EVT VT = N->getValueType(0); 4007 4008 assert((VT == MVT::v8i16 || VT == MVT::v16i16) && 4009 "Unsupported vector type for PSHUFHW"); 4010 4011 unsigned NumElts = VT.getVectorNumElements(); 4012 4013 unsigned Mask = 0; 4014 for (unsigned l = 0; l != NumElts; l += 8) { 4015 // 8 nodes per lane, but we only care about the first 4. 4016 for (unsigned i = 0; i < 4; ++i) { 4017 int Elt = N->getMaskElt(l+i); 4018 if (Elt < 0) continue; 4019 Elt &= 0x3; // only 2-bits 4020 Mask |= Elt << (i * 2); 4021 } 4022 } 4023 4024 return Mask; 4025} 4026 4027/// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle 4028/// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction. 4029static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) { 4030 EVT VT = SVOp->getValueType(0); 4031 unsigned EltSize = VT.getVectorElementType().getSizeInBits() >> 3; 4032 4033 unsigned NumElts = VT.getVectorNumElements(); 4034 unsigned NumLanes = VT.getSizeInBits()/128; 4035 unsigned NumLaneElts = NumElts/NumLanes; 4036 4037 int Val = 0; 4038 unsigned i; 4039 for (i = 0; i != NumElts; ++i) { 4040 Val = SVOp->getMaskElt(i); 4041 if (Val >= 0) 4042 break; 4043 } 4044 if (Val >= (int)NumElts) 4045 Val -= NumElts - NumLaneElts; 4046 4047 assert(Val - i > 0 && "PALIGNR imm should be positive"); 4048 return (Val - i) * EltSize; 4049} 4050 4051/// getExtractVEXTRACTF128Immediate - Return the appropriate immediate 4052/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128 4053/// instructions. 4054unsigned X86::getExtractVEXTRACTF128Immediate(SDNode *N) { 4055 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 4056 llvm_unreachable("Illegal extract subvector for VEXTRACTF128"); 4057 4058 uint64_t Index = 4059 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 4060 4061 EVT VecVT = N->getOperand(0).getValueType(); 4062 EVT ElVT = VecVT.getVectorElementType(); 4063 4064 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4065 return Index / NumElemsPerChunk; 4066} 4067 4068/// getInsertVINSERTF128Immediate - Return the appropriate immediate 4069/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128 4070/// instructions. 4071unsigned X86::getInsertVINSERTF128Immediate(SDNode *N) { 4072 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 4073 llvm_unreachable("Illegal insert subvector for VINSERTF128"); 4074 4075 uint64_t Index = 4076 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 4077 4078 EVT VecVT = N->getValueType(0); 4079 EVT ElVT = VecVT.getVectorElementType(); 4080 4081 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4082 return Index / NumElemsPerChunk; 4083} 4084 4085/// getShuffleCLImmediate - Return the appropriate immediate to shuffle 4086/// the specified VECTOR_SHUFFLE mask with VPERMQ and VPERMPD instructions. 4087/// Handles 256-bit. 4088static unsigned getShuffleCLImmediate(ShuffleVectorSDNode *N) { 4089 EVT VT = N->getValueType(0); 4090 4091 unsigned NumElts = VT.getVectorNumElements(); 4092 4093 assert((VT.is256BitVector() && NumElts == 4) && 4094 "Unsupported vector type for VPERMQ/VPERMPD"); 4095 4096 unsigned Mask = 0; 4097 for (unsigned i = 0; i != NumElts; ++i) { 4098 int Elt = N->getMaskElt(i); 4099 if (Elt < 0) 4100 continue; 4101 Mask |= Elt << (i*2); 4102 } 4103 4104 return Mask; 4105} 4106/// isZeroNode - Returns true if Elt is a constant zero or a floating point 4107/// constant +0.0. 4108bool X86::isZeroNode(SDValue Elt) { 4109 return ((isa<ConstantSDNode>(Elt) && 4110 cast<ConstantSDNode>(Elt)->isNullValue()) || 4111 (isa<ConstantFPSDNode>(Elt) && 4112 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 4113} 4114 4115/// CommuteVectorShuffle - Swap vector_shuffle operands as well as values in 4116/// their permute mask. 4117static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp, 4118 SelectionDAG &DAG) { 4119 EVT VT = SVOp->getValueType(0); 4120 unsigned NumElems = VT.getVectorNumElements(); 4121 SmallVector<int, 8> MaskVec; 4122 4123 for (unsigned i = 0; i != NumElems; ++i) { 4124 int Idx = SVOp->getMaskElt(i); 4125 if (Idx >= 0) { 4126 if (Idx < (int)NumElems) 4127 Idx += NumElems; 4128 else 4129 Idx -= NumElems; 4130 } 4131 MaskVec.push_back(Idx); 4132 } 4133 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1), 4134 SVOp->getOperand(0), &MaskVec[0]); 4135} 4136 4137/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 4138/// match movhlps. The lower half elements should come from upper half of 4139/// V1 (and in order), and the upper half elements should come from the upper 4140/// half of V2 (and in order). 4141static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, EVT VT) { 4142 if (!VT.is128BitVector()) 4143 return false; 4144 if (VT.getVectorNumElements() != 4) 4145 return false; 4146 for (unsigned i = 0, e = 2; i != e; ++i) 4147 if (!isUndefOrEqual(Mask[i], i+2)) 4148 return false; 4149 for (unsigned i = 2; i != 4; ++i) 4150 if (!isUndefOrEqual(Mask[i], i+4)) 4151 return false; 4152 return true; 4153} 4154 4155/// isScalarLoadToVector - Returns true if the node is a scalar load that 4156/// is promoted to a vector. It also returns the LoadSDNode by reference if 4157/// required. 4158static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) { 4159 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR) 4160 return false; 4161 N = N->getOperand(0).getNode(); 4162 if (!ISD::isNON_EXTLoad(N)) 4163 return false; 4164 if (LD) 4165 *LD = cast<LoadSDNode>(N); 4166 return true; 4167} 4168 4169// Test whether the given value is a vector value which will be legalized 4170// into a load. 4171static bool WillBeConstantPoolLoad(SDNode *N) { 4172 if (N->getOpcode() != ISD::BUILD_VECTOR) 4173 return false; 4174 4175 // Check for any non-constant elements. 4176 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 4177 switch (N->getOperand(i).getNode()->getOpcode()) { 4178 case ISD::UNDEF: 4179 case ISD::ConstantFP: 4180 case ISD::Constant: 4181 break; 4182 default: 4183 return false; 4184 } 4185 4186 // Vectors of all-zeros and all-ones are materialized with special 4187 // instructions rather than being loaded. 4188 return !ISD::isBuildVectorAllZeros(N) && 4189 !ISD::isBuildVectorAllOnes(N); 4190} 4191 4192/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 4193/// match movlp{s|d}. The lower half elements should come from lower half of 4194/// V1 (and in order), and the upper half elements should come from the upper 4195/// half of V2 (and in order). And since V1 will become the source of the 4196/// MOVLP, it must be either a vector load or a scalar load to vector. 4197static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, 4198 ArrayRef<int> Mask, EVT VT) { 4199 if (!VT.is128BitVector()) 4200 return false; 4201 4202 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 4203 return false; 4204 // Is V2 is a vector load, don't do this transformation. We will try to use 4205 // load folding shufps op. 4206 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2)) 4207 return false; 4208 4209 unsigned NumElems = VT.getVectorNumElements(); 4210 4211 if (NumElems != 2 && NumElems != 4) 4212 return false; 4213 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 4214 if (!isUndefOrEqual(Mask[i], i)) 4215 return false; 4216 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i) 4217 if (!isUndefOrEqual(Mask[i], i+NumElems)) 4218 return false; 4219 return true; 4220} 4221 4222/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 4223/// all the same. 4224static bool isSplatVector(SDNode *N) { 4225 if (N->getOpcode() != ISD::BUILD_VECTOR) 4226 return false; 4227 4228 SDValue SplatValue = N->getOperand(0); 4229 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 4230 if (N->getOperand(i) != SplatValue) 4231 return false; 4232 return true; 4233} 4234 4235/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 4236/// to an zero vector. 4237/// FIXME: move to dag combiner / method on ShuffleVectorSDNode 4238static bool isZeroShuffle(ShuffleVectorSDNode *N) { 4239 SDValue V1 = N->getOperand(0); 4240 SDValue V2 = N->getOperand(1); 4241 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 4242 for (unsigned i = 0; i != NumElems; ++i) { 4243 int Idx = N->getMaskElt(i); 4244 if (Idx >= (int)NumElems) { 4245 unsigned Opc = V2.getOpcode(); 4246 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode())) 4247 continue; 4248 if (Opc != ISD::BUILD_VECTOR || 4249 !X86::isZeroNode(V2.getOperand(Idx-NumElems))) 4250 return false; 4251 } else if (Idx >= 0) { 4252 unsigned Opc = V1.getOpcode(); 4253 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode())) 4254 continue; 4255 if (Opc != ISD::BUILD_VECTOR || 4256 !X86::isZeroNode(V1.getOperand(Idx))) 4257 return false; 4258 } 4259 } 4260 return true; 4261} 4262 4263/// getZeroVector - Returns a vector of specified type with all zero elements. 4264/// 4265static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, 4266 SelectionDAG &DAG, DebugLoc dl) { 4267 assert(VT.isVector() && "Expected a vector type"); 4268 unsigned Size = VT.getSizeInBits(); 4269 4270 // Always build SSE zero vectors as <4 x i32> bitcasted 4271 // to their dest type. This ensures they get CSE'd. 4272 SDValue Vec; 4273 if (Size == 128) { // SSE 4274 if (Subtarget->hasSSE2()) { // SSE2 4275 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4276 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4277 } else { // SSE1 4278 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4279 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); 4280 } 4281 } else if (Size == 256) { // AVX 4282 if (Subtarget->hasAVX2()) { // AVX2 4283 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4284 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4285 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8); 4286 } else { 4287 // 256-bit logic and arithmetic instructions in AVX are all 4288 // floating-point, no support for integer ops. Emit fp zeroed vectors. 4289 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4290 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4291 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8); 4292 } 4293 } else 4294 llvm_unreachable("Unexpected vector type"); 4295 4296 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4297} 4298 4299/// getOnesVector - Returns a vector of specified type with all bits set. 4300/// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with 4301/// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately. 4302/// Then bitcast to their original type, ensuring they get CSE'd. 4303static SDValue getOnesVector(EVT VT, bool HasAVX2, SelectionDAG &DAG, 4304 DebugLoc dl) { 4305 assert(VT.isVector() && "Expected a vector type"); 4306 unsigned Size = VT.getSizeInBits(); 4307 4308 SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); 4309 SDValue Vec; 4310 if (Size == 256) { 4311 if (HasAVX2) { // AVX2 4312 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4313 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8); 4314 } else { // AVX 4315 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4316 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl); 4317 } 4318 } else if (Size == 128) { 4319 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4320 } else 4321 llvm_unreachable("Unexpected vector type"); 4322 4323 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4324} 4325 4326/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 4327/// that point to V2 points to its first element. 4328static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) { 4329 for (unsigned i = 0; i != NumElems; ++i) { 4330 if (Mask[i] > (int)NumElems) { 4331 Mask[i] = NumElems; 4332 } 4333 } 4334} 4335 4336/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 4337/// operation of specified width. 4338static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4339 SDValue V2) { 4340 unsigned NumElems = VT.getVectorNumElements(); 4341 SmallVector<int, 8> Mask; 4342 Mask.push_back(NumElems); 4343 for (unsigned i = 1; i != NumElems; ++i) 4344 Mask.push_back(i); 4345 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4346} 4347 4348/// getUnpackl - Returns a vector_shuffle node for an unpackl operation. 4349static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4350 SDValue V2) { 4351 unsigned NumElems = VT.getVectorNumElements(); 4352 SmallVector<int, 8> Mask; 4353 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 4354 Mask.push_back(i); 4355 Mask.push_back(i + NumElems); 4356 } 4357 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4358} 4359 4360/// getUnpackh - Returns a vector_shuffle node for an unpackh operation. 4361static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4362 SDValue V2) { 4363 unsigned NumElems = VT.getVectorNumElements(); 4364 SmallVector<int, 8> Mask; 4365 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) { 4366 Mask.push_back(i + Half); 4367 Mask.push_back(i + NumElems + Half); 4368 } 4369 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4370} 4371 4372// PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by 4373// a generic shuffle instruction because the target has no such instructions. 4374// Generate shuffles which repeat i16 and i8 several times until they can be 4375// represented by v4f32 and then be manipulated by target suported shuffles. 4376static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) { 4377 EVT VT = V.getValueType(); 4378 int NumElems = VT.getVectorNumElements(); 4379 DebugLoc dl = V.getDebugLoc(); 4380 4381 while (NumElems > 4) { 4382 if (EltNo < NumElems/2) { 4383 V = getUnpackl(DAG, dl, VT, V, V); 4384 } else { 4385 V = getUnpackh(DAG, dl, VT, V, V); 4386 EltNo -= NumElems/2; 4387 } 4388 NumElems >>= 1; 4389 } 4390 return V; 4391} 4392 4393/// getLegalSplat - Generate a legal splat with supported x86 shuffles 4394static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) { 4395 EVT VT = V.getValueType(); 4396 DebugLoc dl = V.getDebugLoc(); 4397 unsigned Size = VT.getSizeInBits(); 4398 4399 if (Size == 128) { 4400 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V); 4401 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo }; 4402 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32), 4403 &SplatMask[0]); 4404 } else if (Size == 256) { 4405 // To use VPERMILPS to splat scalars, the second half of indicies must 4406 // refer to the higher part, which is a duplication of the lower one, 4407 // because VPERMILPS can only handle in-lane permutations. 4408 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo, 4409 EltNo+4, EltNo+4, EltNo+4, EltNo+4 }; 4410 4411 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V); 4412 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32), 4413 &SplatMask[0]); 4414 } else 4415 llvm_unreachable("Vector size not supported"); 4416 4417 return DAG.getNode(ISD::BITCAST, dl, VT, V); 4418} 4419 4420/// PromoteSplat - Splat is promoted to target supported vector shuffles. 4421static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { 4422 EVT SrcVT = SV->getValueType(0); 4423 SDValue V1 = SV->getOperand(0); 4424 DebugLoc dl = SV->getDebugLoc(); 4425 4426 int EltNo = SV->getSplatIndex(); 4427 int NumElems = SrcVT.getVectorNumElements(); 4428 unsigned Size = SrcVT.getSizeInBits(); 4429 4430 assert(((Size == 128 && NumElems > 4) || Size == 256) && 4431 "Unknown how to promote splat for type"); 4432 4433 // Extract the 128-bit part containing the splat element and update 4434 // the splat element index when it refers to the higher register. 4435 if (Size == 256) { 4436 V1 = Extract128BitVector(V1, EltNo, DAG, dl); 4437 if (EltNo >= NumElems/2) 4438 EltNo -= NumElems/2; 4439 } 4440 4441 // All i16 and i8 vector types can't be used directly by a generic shuffle 4442 // instruction because the target has no such instruction. Generate shuffles 4443 // which repeat i16 and i8 several times until they fit in i32, and then can 4444 // be manipulated by target suported shuffles. 4445 EVT EltVT = SrcVT.getVectorElementType(); 4446 if (EltVT == MVT::i8 || EltVT == MVT::i16) 4447 V1 = PromoteSplati8i16(V1, DAG, EltNo); 4448 4449 // Recreate the 256-bit vector and place the same 128-bit vector 4450 // into the low and high part. This is necessary because we want 4451 // to use VPERM* to shuffle the vectors 4452 if (Size == 256) { 4453 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1); 4454 } 4455 4456 return getLegalSplat(DAG, V1, EltNo); 4457} 4458 4459/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 4460/// vector of zero or undef vector. This produces a shuffle where the low 4461/// element of V2 is swizzled into the zero/undef vector, landing at element 4462/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 4463static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, 4464 bool IsZero, 4465 const X86Subtarget *Subtarget, 4466 SelectionDAG &DAG) { 4467 EVT VT = V2.getValueType(); 4468 SDValue V1 = IsZero 4469 ? getZeroVector(VT, Subtarget, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT); 4470 unsigned NumElems = VT.getVectorNumElements(); 4471 SmallVector<int, 16> MaskVec; 4472 for (unsigned i = 0; i != NumElems; ++i) 4473 // If this is the insertion idx, put the low elt of V2 here. 4474 MaskVec.push_back(i == Idx ? NumElems : i); 4475 return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]); 4476} 4477 4478/// getTargetShuffleMask - Calculates the shuffle mask corresponding to the 4479/// target specific opcode. Returns true if the Mask could be calculated. 4480/// Sets IsUnary to true if only uses one source. 4481static bool getTargetShuffleMask(SDNode *N, MVT VT, 4482 SmallVectorImpl<int> &Mask, bool &IsUnary) { 4483 unsigned NumElems = VT.getVectorNumElements(); 4484 SDValue ImmN; 4485 4486 IsUnary = false; 4487 switch(N->getOpcode()) { 4488 case X86ISD::SHUFP: 4489 ImmN = N->getOperand(N->getNumOperands()-1); 4490 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4491 break; 4492 case X86ISD::UNPCKH: 4493 DecodeUNPCKHMask(VT, Mask); 4494 break; 4495 case X86ISD::UNPCKL: 4496 DecodeUNPCKLMask(VT, Mask); 4497 break; 4498 case X86ISD::MOVHLPS: 4499 DecodeMOVHLPSMask(NumElems, Mask); 4500 break; 4501 case X86ISD::MOVLHPS: 4502 DecodeMOVLHPSMask(NumElems, Mask); 4503 break; 4504 case X86ISD::PSHUFD: 4505 case X86ISD::VPERMILP: 4506 ImmN = N->getOperand(N->getNumOperands()-1); 4507 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4508 IsUnary = true; 4509 break; 4510 case X86ISD::PSHUFHW: 4511 ImmN = N->getOperand(N->getNumOperands()-1); 4512 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4513 IsUnary = true; 4514 break; 4515 case X86ISD::PSHUFLW: 4516 ImmN = N->getOperand(N->getNumOperands()-1); 4517 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4518 IsUnary = true; 4519 break; 4520 case X86ISD::VPERMI: 4521 ImmN = N->getOperand(N->getNumOperands()-1); 4522 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4523 IsUnary = true; 4524 break; 4525 case X86ISD::MOVSS: 4526 case X86ISD::MOVSD: { 4527 // The index 0 always comes from the first element of the second source, 4528 // this is why MOVSS and MOVSD are used in the first place. The other 4529 // elements come from the other positions of the first source vector 4530 Mask.push_back(NumElems); 4531 for (unsigned i = 1; i != NumElems; ++i) { 4532 Mask.push_back(i); 4533 } 4534 break; 4535 } 4536 case X86ISD::VPERM2X128: 4537 ImmN = N->getOperand(N->getNumOperands()-1); 4538 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4539 if (Mask.empty()) return false; 4540 break; 4541 case X86ISD::MOVDDUP: 4542 case X86ISD::MOVLHPD: 4543 case X86ISD::MOVLPD: 4544 case X86ISD::MOVLPS: 4545 case X86ISD::MOVSHDUP: 4546 case X86ISD::MOVSLDUP: 4547 case X86ISD::PALIGN: 4548 // Not yet implemented 4549 return false; 4550 default: llvm_unreachable("unknown target shuffle node"); 4551 } 4552 4553 return true; 4554} 4555 4556/// getShuffleScalarElt - Returns the scalar element that will make up the ith 4557/// element of the result of the vector shuffle. 4558static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG, 4559 unsigned Depth) { 4560 if (Depth == 6) 4561 return SDValue(); // Limit search depth. 4562 4563 SDValue V = SDValue(N, 0); 4564 EVT VT = V.getValueType(); 4565 unsigned Opcode = V.getOpcode(); 4566 4567 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars. 4568 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) { 4569 int Elt = SV->getMaskElt(Index); 4570 4571 if (Elt < 0) 4572 return DAG.getUNDEF(VT.getVectorElementType()); 4573 4574 unsigned NumElems = VT.getVectorNumElements(); 4575 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0) 4576 : SV->getOperand(1); 4577 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1); 4578 } 4579 4580 // Recurse into target specific vector shuffles to find scalars. 4581 if (isTargetShuffle(Opcode)) { 4582 MVT ShufVT = V.getValueType().getSimpleVT(); 4583 unsigned NumElems = ShufVT.getVectorNumElements(); 4584 SmallVector<int, 16> ShuffleMask; 4585 SDValue ImmN; 4586 bool IsUnary; 4587 4588 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary)) 4589 return SDValue(); 4590 4591 int Elt = ShuffleMask[Index]; 4592 if (Elt < 0) 4593 return DAG.getUNDEF(ShufVT.getVectorElementType()); 4594 4595 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0) 4596 : N->getOperand(1); 4597 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, 4598 Depth+1); 4599 } 4600 4601 // Actual nodes that may contain scalar elements 4602 if (Opcode == ISD::BITCAST) { 4603 V = V.getOperand(0); 4604 EVT SrcVT = V.getValueType(); 4605 unsigned NumElems = VT.getVectorNumElements(); 4606 4607 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems) 4608 return SDValue(); 4609 } 4610 4611 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) 4612 return (Index == 0) ? V.getOperand(0) 4613 : DAG.getUNDEF(VT.getVectorElementType()); 4614 4615 if (V.getOpcode() == ISD::BUILD_VECTOR) 4616 return V.getOperand(Index); 4617 4618 return SDValue(); 4619} 4620 4621/// getNumOfConsecutiveZeros - Return the number of elements of a vector 4622/// shuffle operation which come from a consecutively from a zero. The 4623/// search can start in two different directions, from left or right. 4624static 4625unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp, unsigned NumElems, 4626 bool ZerosFromLeft, SelectionDAG &DAG) { 4627 unsigned i; 4628 for (i = 0; i != NumElems; ++i) { 4629 unsigned Index = ZerosFromLeft ? i : NumElems-i-1; 4630 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0); 4631 if (!(Elt.getNode() && 4632 (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt)))) 4633 break; 4634 } 4635 4636 return i; 4637} 4638 4639/// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE) 4640/// correspond consecutively to elements from one of the vector operands, 4641/// starting from its index OpIdx. Also tell OpNum which source vector operand. 4642static 4643bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, 4644 unsigned MaskI, unsigned MaskE, unsigned OpIdx, 4645 unsigned NumElems, unsigned &OpNum) { 4646 bool SeenV1 = false; 4647 bool SeenV2 = false; 4648 4649 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) { 4650 int Idx = SVOp->getMaskElt(i); 4651 // Ignore undef indicies 4652 if (Idx < 0) 4653 continue; 4654 4655 if (Idx < (int)NumElems) 4656 SeenV1 = true; 4657 else 4658 SeenV2 = true; 4659 4660 // Only accept consecutive elements from the same vector 4661 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2)) 4662 return false; 4663 } 4664 4665 OpNum = SeenV1 ? 0 : 1; 4666 return true; 4667} 4668 4669/// isVectorShiftRight - Returns true if the shuffle can be implemented as a 4670/// logical left shift of a vector. 4671static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4672 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4673 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4674 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 4675 false /* check zeros from right */, DAG); 4676 unsigned OpSrc; 4677 4678 if (!NumZeros) 4679 return false; 4680 4681 // Considering the elements in the mask that are not consecutive zeros, 4682 // check if they consecutively come from only one of the source vectors. 4683 // 4684 // V1 = {X, A, B, C} 0 4685 // \ \ \ / 4686 // vector_shuffle V1, V2 <1, 2, 3, X> 4687 // 4688 if (!isShuffleMaskConsecutive(SVOp, 4689 0, // Mask Start Index 4690 NumElems-NumZeros, // Mask End Index(exclusive) 4691 NumZeros, // Where to start looking in the src vector 4692 NumElems, // Number of elements in vector 4693 OpSrc)) // Which source operand ? 4694 return false; 4695 4696 isLeft = false; 4697 ShAmt = NumZeros; 4698 ShVal = SVOp->getOperand(OpSrc); 4699 return true; 4700} 4701 4702/// isVectorShiftLeft - Returns true if the shuffle can be implemented as a 4703/// logical left shift of a vector. 4704static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4705 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4706 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4707 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 4708 true /* check zeros from left */, DAG); 4709 unsigned OpSrc; 4710 4711 if (!NumZeros) 4712 return false; 4713 4714 // Considering the elements in the mask that are not consecutive zeros, 4715 // check if they consecutively come from only one of the source vectors. 4716 // 4717 // 0 { A, B, X, X } = V2 4718 // / \ / / 4719 // vector_shuffle V1, V2 <X, X, 4, 5> 4720 // 4721 if (!isShuffleMaskConsecutive(SVOp, 4722 NumZeros, // Mask Start Index 4723 NumElems, // Mask End Index(exclusive) 4724 0, // Where to start looking in the src vector 4725 NumElems, // Number of elements in vector 4726 OpSrc)) // Which source operand ? 4727 return false; 4728 4729 isLeft = true; 4730 ShAmt = NumZeros; 4731 ShVal = SVOp->getOperand(OpSrc); 4732 return true; 4733} 4734 4735/// isVectorShift - Returns true if the shuffle can be implemented as a 4736/// logical left or right shift of a vector. 4737static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4738 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4739 // Although the logic below support any bitwidth size, there are no 4740 // shift instructions which handle more than 128-bit vectors. 4741 if (!SVOp->getValueType(0).is128BitVector()) 4742 return false; 4743 4744 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) || 4745 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt)) 4746 return true; 4747 4748 return false; 4749} 4750 4751/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 4752/// 4753static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, 4754 unsigned NumNonZero, unsigned NumZero, 4755 SelectionDAG &DAG, 4756 const X86Subtarget* Subtarget, 4757 const TargetLowering &TLI) { 4758 if (NumNonZero > 8) 4759 return SDValue(); 4760 4761 DebugLoc dl = Op.getDebugLoc(); 4762 SDValue V(0, 0); 4763 bool First = true; 4764 for (unsigned i = 0; i < 16; ++i) { 4765 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 4766 if (ThisIsNonZero && First) { 4767 if (NumZero) 4768 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl); 4769 else 4770 V = DAG.getUNDEF(MVT::v8i16); 4771 First = false; 4772 } 4773 4774 if ((i & 1) != 0) { 4775 SDValue ThisElt(0, 0), LastElt(0, 0); 4776 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 4777 if (LastIsNonZero) { 4778 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl, 4779 MVT::i16, Op.getOperand(i-1)); 4780 } 4781 if (ThisIsNonZero) { 4782 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i)); 4783 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, 4784 ThisElt, DAG.getConstant(8, MVT::i8)); 4785 if (LastIsNonZero) 4786 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt); 4787 } else 4788 ThisElt = LastElt; 4789 4790 if (ThisElt.getNode()) 4791 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt, 4792 DAG.getIntPtrConstant(i/2)); 4793 } 4794 } 4795 4796 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V); 4797} 4798 4799/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 4800/// 4801static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, 4802 unsigned NumNonZero, unsigned NumZero, 4803 SelectionDAG &DAG, 4804 const X86Subtarget* Subtarget, 4805 const TargetLowering &TLI) { 4806 if (NumNonZero > 4) 4807 return SDValue(); 4808 4809 DebugLoc dl = Op.getDebugLoc(); 4810 SDValue V(0, 0); 4811 bool First = true; 4812 for (unsigned i = 0; i < 8; ++i) { 4813 bool isNonZero = (NonZeros & (1 << i)) != 0; 4814 if (isNonZero) { 4815 if (First) { 4816 if (NumZero) 4817 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl); 4818 else 4819 V = DAG.getUNDEF(MVT::v8i16); 4820 First = false; 4821 } 4822 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, 4823 MVT::v8i16, V, Op.getOperand(i), 4824 DAG.getIntPtrConstant(i)); 4825 } 4826 } 4827 4828 return V; 4829} 4830 4831/// getVShift - Return a vector logical shift node. 4832/// 4833static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, 4834 unsigned NumBits, SelectionDAG &DAG, 4835 const TargetLowering &TLI, DebugLoc dl) { 4836 assert(VT.is128BitVector() && "Unknown type for VShift"); 4837 EVT ShVT = MVT::v2i64; 4838 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ; 4839 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp); 4840 return DAG.getNode(ISD::BITCAST, dl, VT, 4841 DAG.getNode(Opc, dl, ShVT, SrcOp, 4842 DAG.getConstant(NumBits, 4843 TLI.getShiftAmountTy(SrcOp.getValueType())))); 4844} 4845 4846SDValue 4847X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, 4848 SelectionDAG &DAG) const { 4849 4850 // Check if the scalar load can be widened into a vector load. And if 4851 // the address is "base + cst" see if the cst can be "absorbed" into 4852 // the shuffle mask. 4853 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) { 4854 SDValue Ptr = LD->getBasePtr(); 4855 if (!ISD::isNormalLoad(LD) || LD->isVolatile()) 4856 return SDValue(); 4857 EVT PVT = LD->getValueType(0); 4858 if (PVT != MVT::i32 && PVT != MVT::f32) 4859 return SDValue(); 4860 4861 int FI = -1; 4862 int64_t Offset = 0; 4863 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) { 4864 FI = FINode->getIndex(); 4865 Offset = 0; 4866 } else if (DAG.isBaseWithConstantOffset(Ptr) && 4867 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 4868 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 4869 Offset = Ptr.getConstantOperandVal(1); 4870 Ptr = Ptr.getOperand(0); 4871 } else { 4872 return SDValue(); 4873 } 4874 4875 // FIXME: 256-bit vector instructions don't require a strict alignment, 4876 // improve this code to support it better. 4877 unsigned RequiredAlign = VT.getSizeInBits()/8; 4878 SDValue Chain = LD->getChain(); 4879 // Make sure the stack object alignment is at least 16 or 32. 4880 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 4881 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) { 4882 if (MFI->isFixedObjectIndex(FI)) { 4883 // Can't change the alignment. FIXME: It's possible to compute 4884 // the exact stack offset and reference FI + adjust offset instead. 4885 // If someone *really* cares about this. That's the way to implement it. 4886 return SDValue(); 4887 } else { 4888 MFI->setObjectAlignment(FI, RequiredAlign); 4889 } 4890 } 4891 4892 // (Offset % 16 or 32) must be multiple of 4. Then address is then 4893 // Ptr + (Offset & ~15). 4894 if (Offset < 0) 4895 return SDValue(); 4896 if ((Offset % RequiredAlign) & 3) 4897 return SDValue(); 4898 int64_t StartOffset = Offset & ~(RequiredAlign-1); 4899 if (StartOffset) 4900 Ptr = DAG.getNode(ISD::ADD, Ptr.getDebugLoc(), Ptr.getValueType(), 4901 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType())); 4902 4903 int EltNo = (Offset - StartOffset) >> 2; 4904 unsigned NumElems = VT.getVectorNumElements(); 4905 4906 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems); 4907 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr, 4908 LD->getPointerInfo().getWithOffset(StartOffset), 4909 false, false, false, 0); 4910 4911 SmallVector<int, 8> Mask; 4912 for (unsigned i = 0; i != NumElems; ++i) 4913 Mask.push_back(EltNo); 4914 4915 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]); 4916 } 4917 4918 return SDValue(); 4919} 4920 4921/// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a 4922/// vector of type 'VT', see if the elements can be replaced by a single large 4923/// load which has the same value as a build_vector whose operands are 'elts'. 4924/// 4925/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a 4926/// 4927/// FIXME: we'd also like to handle the case where the last elements are zero 4928/// rather than undef via VZEXT_LOAD, but we do not detect that case today. 4929/// There's even a handy isZeroNode for that purpose. 4930static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, 4931 DebugLoc &DL, SelectionDAG &DAG) { 4932 EVT EltVT = VT.getVectorElementType(); 4933 unsigned NumElems = Elts.size(); 4934 4935 LoadSDNode *LDBase = NULL; 4936 unsigned LastLoadedElt = -1U; 4937 4938 // For each element in the initializer, see if we've found a load or an undef. 4939 // If we don't find an initial load element, or later load elements are 4940 // non-consecutive, bail out. 4941 for (unsigned i = 0; i < NumElems; ++i) { 4942 SDValue Elt = Elts[i]; 4943 4944 if (!Elt.getNode() || 4945 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode()))) 4946 return SDValue(); 4947 if (!LDBase) { 4948 if (Elt.getNode()->getOpcode() == ISD::UNDEF) 4949 return SDValue(); 4950 LDBase = cast<LoadSDNode>(Elt.getNode()); 4951 LastLoadedElt = i; 4952 continue; 4953 } 4954 if (Elt.getOpcode() == ISD::UNDEF) 4955 continue; 4956 4957 LoadSDNode *LD = cast<LoadSDNode>(Elt); 4958 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i)) 4959 return SDValue(); 4960 LastLoadedElt = i; 4961 } 4962 4963 // If we have found an entire vector of loads and undefs, then return a large 4964 // load of the entire vector width starting at the base pointer. If we found 4965 // consecutive loads for the low half, generate a vzext_load node. 4966 if (LastLoadedElt == NumElems - 1) { 4967 if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16) 4968 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 4969 LDBase->getPointerInfo(), 4970 LDBase->isVolatile(), LDBase->isNonTemporal(), 4971 LDBase->isInvariant(), 0); 4972 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 4973 LDBase->getPointerInfo(), 4974 LDBase->isVolatile(), LDBase->isNonTemporal(), 4975 LDBase->isInvariant(), LDBase->getAlignment()); 4976 } 4977 if (NumElems == 4 && LastLoadedElt == 1 && 4978 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) { 4979 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other); 4980 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() }; 4981 SDValue ResNode = 4982 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, 2, MVT::i64, 4983 LDBase->getPointerInfo(), 4984 LDBase->getAlignment(), 4985 false/*isVolatile*/, true/*ReadMem*/, 4986 false/*WriteMem*/); 4987 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode); 4988 } 4989 return SDValue(); 4990} 4991 4992/// LowerVectorBroadcast - Attempt to use the vbroadcast instruction 4993/// to generate a splat value for the following cases: 4994/// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant. 4995/// 2. A splat shuffle which uses a scalar_to_vector node which comes from 4996/// a scalar load, or a constant. 4997/// The VBROADCAST node is returned when a pattern is found, 4998/// or SDValue() otherwise. 4999SDValue 5000X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const { 5001 if (!Subtarget->hasAVX()) 5002 return SDValue(); 5003 5004 EVT VT = Op.getValueType(); 5005 DebugLoc dl = Op.getDebugLoc(); 5006 5007 assert((VT.is128BitVector() || VT.is256BitVector()) && 5008 "Unsupported vector type for broadcast."); 5009 5010 SDValue Ld; 5011 bool ConstSplatVal; 5012 5013 switch (Op.getOpcode()) { 5014 default: 5015 // Unknown pattern found. 5016 return SDValue(); 5017 5018 case ISD::BUILD_VECTOR: { 5019 // The BUILD_VECTOR node must be a splat. 5020 if (!isSplatVector(Op.getNode())) 5021 return SDValue(); 5022 5023 Ld = Op.getOperand(0); 5024 ConstSplatVal = (Ld.getOpcode() == ISD::Constant || 5025 Ld.getOpcode() == ISD::ConstantFP); 5026 5027 // The suspected load node has several users. Make sure that all 5028 // of its users are from the BUILD_VECTOR node. 5029 // Constants may have multiple users. 5030 if (!ConstSplatVal && !Ld->hasNUsesOfValue(VT.getVectorNumElements(), 0)) 5031 return SDValue(); 5032 break; 5033 } 5034 5035 case ISD::VECTOR_SHUFFLE: { 5036 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5037 5038 // Shuffles must have a splat mask where the first element is 5039 // broadcasted. 5040 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0) 5041 return SDValue(); 5042 5043 SDValue Sc = Op.getOperand(0); 5044 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR && 5045 Sc.getOpcode() != ISD::BUILD_VECTOR) { 5046 5047 if (!Subtarget->hasAVX2()) 5048 return SDValue(); 5049 5050 // Use the register form of the broadcast instruction available on AVX2. 5051 if (VT.is256BitVector()) 5052 Sc = Extract128BitVector(Sc, 0, DAG, dl); 5053 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc); 5054 } 5055 5056 Ld = Sc.getOperand(0); 5057 ConstSplatVal = (Ld.getOpcode() == ISD::Constant || 5058 Ld.getOpcode() == ISD::ConstantFP); 5059 5060 // The scalar_to_vector node and the suspected 5061 // load node must have exactly one user. 5062 // Constants may have multiple users. 5063 if (!ConstSplatVal && (!Sc.hasOneUse() || !Ld.hasOneUse())) 5064 return SDValue(); 5065 break; 5066 } 5067 } 5068 5069 bool Is256 = VT.is256BitVector(); 5070 5071 // Handle the broadcasting a single constant scalar from the constant pool 5072 // into a vector. On Sandybridge it is still better to load a constant vector 5073 // from the constant pool and not to broadcast it from a scalar. 5074 if (ConstSplatVal && Subtarget->hasAVX2()) { 5075 EVT CVT = Ld.getValueType(); 5076 assert(!CVT.isVector() && "Must not broadcast a vector type"); 5077 unsigned ScalarSize = CVT.getSizeInBits(); 5078 5079 if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) { 5080 const Constant *C = 0; 5081 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld)) 5082 C = CI->getConstantIntValue(); 5083 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld)) 5084 C = CF->getConstantFPValue(); 5085 5086 assert(C && "Invalid constant type"); 5087 5088 SDValue CP = DAG.getConstantPool(C, getPointerTy()); 5089 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment(); 5090 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP, 5091 MachinePointerInfo::getConstantPool(), 5092 false, false, false, Alignment); 5093 5094 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5095 } 5096 } 5097 5098 bool IsLoad = ISD::isNormalLoad(Ld.getNode()); 5099 unsigned ScalarSize = Ld.getValueType().getSizeInBits(); 5100 5101 // Handle AVX2 in-register broadcasts. 5102 if (!IsLoad && Subtarget->hasAVX2() && 5103 (ScalarSize == 32 || (Is256 && ScalarSize == 64))) 5104 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5105 5106 // The scalar source must be a normal load. 5107 if (!IsLoad) 5108 return SDValue(); 5109 5110 if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) 5111 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5112 5113 // The integer check is needed for the 64-bit into 128-bit so it doesn't match 5114 // double since there is no vbroadcastsd xmm 5115 if (Subtarget->hasAVX2() && Ld.getValueType().isInteger()) { 5116 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64) 5117 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5118 } 5119 5120 // Unsupported broadcast. 5121 return SDValue(); 5122} 5123 5124SDValue 5125X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { 5126 DebugLoc dl = Op.getDebugLoc(); 5127 5128 EVT VT = Op.getValueType(); 5129 EVT ExtVT = VT.getVectorElementType(); 5130 unsigned NumElems = Op.getNumOperands(); 5131 5132 // Vectors containing all zeros can be matched by pxor and xorps later 5133 if (ISD::isBuildVectorAllZeros(Op.getNode())) { 5134 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd 5135 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts. 5136 if (VT == MVT::v4i32 || VT == MVT::v8i32) 5137 return Op; 5138 5139 return getZeroVector(VT, Subtarget, DAG, dl); 5140 } 5141 5142 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width 5143 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use 5144 // vpcmpeqd on 256-bit vectors. 5145 if (ISD::isBuildVectorAllOnes(Op.getNode())) { 5146 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasAVX2())) 5147 return Op; 5148 5149 return getOnesVector(VT, Subtarget->hasAVX2(), DAG, dl); 5150 } 5151 5152 SDValue Broadcast = LowerVectorBroadcast(Op, DAG); 5153 if (Broadcast.getNode()) 5154 return Broadcast; 5155 5156 unsigned EVTBits = ExtVT.getSizeInBits(); 5157 5158 unsigned NumZero = 0; 5159 unsigned NumNonZero = 0; 5160 unsigned NonZeros = 0; 5161 bool IsAllConstants = true; 5162 SmallSet<SDValue, 8> Values; 5163 for (unsigned i = 0; i < NumElems; ++i) { 5164 SDValue Elt = Op.getOperand(i); 5165 if (Elt.getOpcode() == ISD::UNDEF) 5166 continue; 5167 Values.insert(Elt); 5168 if (Elt.getOpcode() != ISD::Constant && 5169 Elt.getOpcode() != ISD::ConstantFP) 5170 IsAllConstants = false; 5171 if (X86::isZeroNode(Elt)) 5172 NumZero++; 5173 else { 5174 NonZeros |= (1 << i); 5175 NumNonZero++; 5176 } 5177 } 5178 5179 // All undef vector. Return an UNDEF. All zero vectors were handled above. 5180 if (NumNonZero == 0) 5181 return DAG.getUNDEF(VT); 5182 5183 // Special case for single non-zero, non-undef, element. 5184 if (NumNonZero == 1) { 5185 unsigned Idx = CountTrailingZeros_32(NonZeros); 5186 SDValue Item = Op.getOperand(Idx); 5187 5188 // If this is an insertion of an i64 value on x86-32, and if the top bits of 5189 // the value are obviously zero, truncate the value to i32 and do the 5190 // insertion that way. Only do this if the value is non-constant or if the 5191 // value is a constant being inserted into element 0. It is cheaper to do 5192 // a constant pool load than it is to do a movd + shuffle. 5193 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() && 5194 (!IsAllConstants || Idx == 0)) { 5195 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 5196 // Handle SSE only. 5197 assert(VT == MVT::v2i64 && "Expected an SSE value type!"); 5198 EVT VecVT = MVT::v4i32; 5199 unsigned VecElts = 4; 5200 5201 // Truncate the value (which may itself be a constant) to i32, and 5202 // convert it to a vector with movd (S2V+shuffle to zero extend). 5203 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item); 5204 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item); 5205 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5206 5207 // Now we have our 32-bit value zero extended in the low element of 5208 // a vector. If Idx != 0, swizzle it into place. 5209 if (Idx != 0) { 5210 SmallVector<int, 4> Mask; 5211 Mask.push_back(Idx); 5212 for (unsigned i = 1; i != VecElts; ++i) 5213 Mask.push_back(i); 5214 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT), 5215 &Mask[0]); 5216 } 5217 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5218 } 5219 } 5220 5221 // If we have a constant or non-constant insertion into the low element of 5222 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 5223 // the rest of the elements. This will be matched as movd/movq/movss/movsd 5224 // depending on what the source datatype is. 5225 if (Idx == 0) { 5226 if (NumZero == 0) 5227 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5228 5229 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 || 5230 (ExtVT == MVT::i64 && Subtarget->is64Bit())) { 5231 if (VT.is256BitVector()) { 5232 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl); 5233 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec, 5234 Item, DAG.getIntPtrConstant(0)); 5235 } 5236 assert(VT.is128BitVector() && "Expected an SSE value type!"); 5237 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5238 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 5239 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5240 } 5241 5242 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) { 5243 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item); 5244 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item); 5245 if (VT.is256BitVector()) { 5246 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl); 5247 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl); 5248 } else { 5249 assert(VT.is128BitVector() && "Expected an SSE value type!"); 5250 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5251 } 5252 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5253 } 5254 } 5255 5256 // Is it a vector logical left shift? 5257 if (NumElems == 2 && Idx == 1 && 5258 X86::isZeroNode(Op.getOperand(0)) && 5259 !X86::isZeroNode(Op.getOperand(1))) { 5260 unsigned NumBits = VT.getSizeInBits(); 5261 return getVShift(true, VT, 5262 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 5263 VT, Op.getOperand(1)), 5264 NumBits/2, DAG, *this, dl); 5265 } 5266 5267 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 5268 return SDValue(); 5269 5270 // Otherwise, if this is a vector with i32 or f32 elements, and the element 5271 // is a non-constant being inserted into an element other than the low one, 5272 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 5273 // movd/movss) to move this into the low element, then shuffle it into 5274 // place. 5275 if (EVTBits == 32) { 5276 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5277 5278 // Turn it into a shuffle of zero and zero-extended scalar to vector. 5279 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG); 5280 SmallVector<int, 8> MaskVec; 5281 for (unsigned i = 0; i != NumElems; ++i) 5282 MaskVec.push_back(i == Idx ? 0 : 1); 5283 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]); 5284 } 5285 } 5286 5287 // Splat is obviously ok. Let legalizer expand it to a shuffle. 5288 if (Values.size() == 1) { 5289 if (EVTBits == 32) { 5290 // Instead of a shuffle like this: 5291 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0> 5292 // Check if it's possible to issue this instead. 5293 // shuffle (vload ptr)), undef, <1, 1, 1, 1> 5294 unsigned Idx = CountTrailingZeros_32(NonZeros); 5295 SDValue Item = Op.getOperand(Idx); 5296 if (Op.getNode()->isOnlyUserOf(Item.getNode())) 5297 return LowerAsSplatVectorLoad(Item, VT, dl, DAG); 5298 } 5299 return SDValue(); 5300 } 5301 5302 // A vector full of immediates; various special cases are already 5303 // handled, so this is best done with a single constant-pool load. 5304 if (IsAllConstants) 5305 return SDValue(); 5306 5307 // For AVX-length vectors, build the individual 128-bit pieces and use 5308 // shuffles to put them in place. 5309 if (VT.is256BitVector()) { 5310 SmallVector<SDValue, 32> V; 5311 for (unsigned i = 0; i != NumElems; ++i) 5312 V.push_back(Op.getOperand(i)); 5313 5314 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2); 5315 5316 // Build both the lower and upper subvector. 5317 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[0], NumElems/2); 5318 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[NumElems / 2], 5319 NumElems/2); 5320 5321 // Recreate the wider vector with the lower and upper part. 5322 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl); 5323 } 5324 5325 // Let legalizer expand 2-wide build_vectors. 5326 if (EVTBits == 64) { 5327 if (NumNonZero == 1) { 5328 // One half is zero or undef. 5329 unsigned Idx = CountTrailingZeros_32(NonZeros); 5330 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, 5331 Op.getOperand(Idx)); 5332 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG); 5333 } 5334 return SDValue(); 5335 } 5336 5337 // If element VT is < 32 bits, convert it to inserts into a zero vector. 5338 if (EVTBits == 8 && NumElems == 16) { 5339 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 5340 Subtarget, *this); 5341 if (V.getNode()) return V; 5342 } 5343 5344 if (EVTBits == 16 && NumElems == 8) { 5345 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 5346 Subtarget, *this); 5347 if (V.getNode()) return V; 5348 } 5349 5350 // If element VT is == 32 bits, turn it into a number of shuffles. 5351 SmallVector<SDValue, 8> V(NumElems); 5352 if (NumElems == 4 && NumZero > 0) { 5353 for (unsigned i = 0; i < 4; ++i) { 5354 bool isZero = !(NonZeros & (1 << i)); 5355 if (isZero) 5356 V[i] = getZeroVector(VT, Subtarget, DAG, dl); 5357 else 5358 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5359 } 5360 5361 for (unsigned i = 0; i < 2; ++i) { 5362 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 5363 default: break; 5364 case 0: 5365 V[i] = V[i*2]; // Must be a zero vector. 5366 break; 5367 case 1: 5368 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]); 5369 break; 5370 case 2: 5371 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]); 5372 break; 5373 case 3: 5374 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]); 5375 break; 5376 } 5377 } 5378 5379 bool Reverse1 = (NonZeros & 0x3) == 2; 5380 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2; 5381 int MaskVec[] = { 5382 Reverse1 ? 1 : 0, 5383 Reverse1 ? 0 : 1, 5384 static_cast<int>(Reverse2 ? NumElems+1 : NumElems), 5385 static_cast<int>(Reverse2 ? NumElems : NumElems+1) 5386 }; 5387 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]); 5388 } 5389 5390 if (Values.size() > 1 && VT.is128BitVector()) { 5391 // Check for a build vector of consecutive loads. 5392 for (unsigned i = 0; i < NumElems; ++i) 5393 V[i] = Op.getOperand(i); 5394 5395 // Check for elements which are consecutive loads. 5396 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG); 5397 if (LD.getNode()) 5398 return LD; 5399 5400 // For SSE 4.1, use insertps to put the high elements into the low element. 5401 if (getSubtarget()->hasSSE41()) { 5402 SDValue Result; 5403 if (Op.getOperand(0).getOpcode() != ISD::UNDEF) 5404 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); 5405 else 5406 Result = DAG.getUNDEF(VT); 5407 5408 for (unsigned i = 1; i < NumElems; ++i) { 5409 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue; 5410 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, 5411 Op.getOperand(i), DAG.getIntPtrConstant(i)); 5412 } 5413 return Result; 5414 } 5415 5416 // Otherwise, expand into a number of unpckl*, start by extending each of 5417 // our (non-undef) elements to the full vector width with the element in the 5418 // bottom slot of the vector (which generates no code for SSE). 5419 for (unsigned i = 0; i < NumElems; ++i) { 5420 if (Op.getOperand(i).getOpcode() != ISD::UNDEF) 5421 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5422 else 5423 V[i] = DAG.getUNDEF(VT); 5424 } 5425 5426 // Next, we iteratively mix elements, e.g. for v4f32: 5427 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 5428 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 5429 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 5430 unsigned EltStride = NumElems >> 1; 5431 while (EltStride != 0) { 5432 for (unsigned i = 0; i < EltStride; ++i) { 5433 // If V[i+EltStride] is undef and this is the first round of mixing, 5434 // then it is safe to just drop this shuffle: V[i] is already in the 5435 // right place, the one element (since it's the first round) being 5436 // inserted as undef can be dropped. This isn't safe for successive 5437 // rounds because they will permute elements within both vectors. 5438 if (V[i+EltStride].getOpcode() == ISD::UNDEF && 5439 EltStride == NumElems/2) 5440 continue; 5441 5442 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]); 5443 } 5444 EltStride >>= 1; 5445 } 5446 return V[0]; 5447 } 5448 return SDValue(); 5449} 5450 5451// LowerMMXCONCAT_VECTORS - We support concatenate two MMX registers and place 5452// them in a MMX register. This is better than doing a stack convert. 5453static SDValue LowerMMXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5454 DebugLoc dl = Op.getDebugLoc(); 5455 EVT ResVT = Op.getValueType(); 5456 5457 assert(ResVT == MVT::v2i64 || ResVT == MVT::v4i32 || 5458 ResVT == MVT::v8i16 || ResVT == MVT::v16i8); 5459 int Mask[2]; 5460 SDValue InVec = DAG.getNode(ISD::BITCAST,dl, MVT::v1i64, Op.getOperand(0)); 5461 SDValue VecOp = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec); 5462 InVec = Op.getOperand(1); 5463 if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5464 unsigned NumElts = ResVT.getVectorNumElements(); 5465 VecOp = DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp); 5466 VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ResVT, VecOp, 5467 InVec.getOperand(0), DAG.getIntPtrConstant(NumElts/2+1)); 5468 } else { 5469 InVec = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, InVec); 5470 SDValue VecOp2 = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec); 5471 Mask[0] = 0; Mask[1] = 2; 5472 VecOp = DAG.getVectorShuffle(MVT::v2i64, dl, VecOp, VecOp2, Mask); 5473 } 5474 return DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp); 5475} 5476 5477// LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction 5478// to create 256-bit vectors from two other 128-bit ones. 5479static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5480 DebugLoc dl = Op.getDebugLoc(); 5481 EVT ResVT = Op.getValueType(); 5482 5483 assert(ResVT.is256BitVector() && "Value type must be 256-bit wide"); 5484 5485 SDValue V1 = Op.getOperand(0); 5486 SDValue V2 = Op.getOperand(1); 5487 unsigned NumElems = ResVT.getVectorNumElements(); 5488 5489 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl); 5490} 5491 5492SDValue 5493X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const { 5494 EVT ResVT = Op.getValueType(); 5495 5496 assert(Op.getNumOperands() == 2); 5497 assert((ResVT.is128BitVector() || ResVT.is256BitVector()) && 5498 "Unsupported CONCAT_VECTORS for value type"); 5499 5500 // We support concatenate two MMX registers and place them in a MMX register. 5501 // This is better than doing a stack convert. 5502 if (ResVT.is128BitVector()) 5503 return LowerMMXCONCAT_VECTORS(Op, DAG); 5504 5505 // 256-bit AVX can use the vinsertf128 instruction to create 256-bit vectors 5506 // from two other 128-bit ones. 5507 return LowerAVXCONCAT_VECTORS(Op, DAG); 5508} 5509 5510// Try to lower a shuffle node into a simple blend instruction. 5511static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp, 5512 const X86Subtarget *Subtarget, 5513 SelectionDAG &DAG) { 5514 SDValue V1 = SVOp->getOperand(0); 5515 SDValue V2 = SVOp->getOperand(1); 5516 DebugLoc dl = SVOp->getDebugLoc(); 5517 MVT VT = SVOp->getValueType(0).getSimpleVT(); 5518 unsigned NumElems = VT.getVectorNumElements(); 5519 5520 if (!Subtarget->hasSSE41()) 5521 return SDValue(); 5522 5523 unsigned ISDNo = 0; 5524 MVT OpTy; 5525 5526 switch (VT.SimpleTy) { 5527 default: return SDValue(); 5528 case MVT::v8i16: 5529 ISDNo = X86ISD::BLENDPW; 5530 OpTy = MVT::v8i16; 5531 break; 5532 case MVT::v4i32: 5533 case MVT::v4f32: 5534 ISDNo = X86ISD::BLENDPS; 5535 OpTy = MVT::v4f32; 5536 break; 5537 case MVT::v2i64: 5538 case MVT::v2f64: 5539 ISDNo = X86ISD::BLENDPD; 5540 OpTy = MVT::v2f64; 5541 break; 5542 case MVT::v8i32: 5543 case MVT::v8f32: 5544 if (!Subtarget->hasAVX()) 5545 return SDValue(); 5546 ISDNo = X86ISD::BLENDPS; 5547 OpTy = MVT::v8f32; 5548 break; 5549 case MVT::v4i64: 5550 case MVT::v4f64: 5551 if (!Subtarget->hasAVX()) 5552 return SDValue(); 5553 ISDNo = X86ISD::BLENDPD; 5554 OpTy = MVT::v4f64; 5555 break; 5556 } 5557 assert(ISDNo && "Invalid Op Number"); 5558 5559 unsigned MaskVals = 0; 5560 5561 for (unsigned i = 0; i != NumElems; ++i) { 5562 int EltIdx = SVOp->getMaskElt(i); 5563 if (EltIdx == (int)i || EltIdx < 0) 5564 MaskVals |= (1<<i); 5565 else if (EltIdx == (int)(i + NumElems)) 5566 continue; // Bit is set to zero; 5567 else 5568 return SDValue(); 5569 } 5570 5571 V1 = DAG.getNode(ISD::BITCAST, dl, OpTy, V1); 5572 V2 = DAG.getNode(ISD::BITCAST, dl, OpTy, V2); 5573 SDValue Ret = DAG.getNode(ISDNo, dl, OpTy, V1, V2, 5574 DAG.getConstant(MaskVals, MVT::i32)); 5575 return DAG.getNode(ISD::BITCAST, dl, VT, Ret); 5576} 5577 5578// v8i16 shuffles - Prefer shuffles in the following order: 5579// 1. [all] pshuflw, pshufhw, optional move 5580// 2. [ssse3] 1 x pshufb 5581// 3. [ssse3] 2 x pshufb + 1 x por 5582// 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw) 5583SDValue 5584X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op, 5585 SelectionDAG &DAG) const { 5586 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5587 SDValue V1 = SVOp->getOperand(0); 5588 SDValue V2 = SVOp->getOperand(1); 5589 DebugLoc dl = SVOp->getDebugLoc(); 5590 SmallVector<int, 8> MaskVals; 5591 5592 // Determine if more than 1 of the words in each of the low and high quadwords 5593 // of the result come from the same quadword of one of the two inputs. Undef 5594 // mask values count as coming from any quadword, for better codegen. 5595 unsigned LoQuad[] = { 0, 0, 0, 0 }; 5596 unsigned HiQuad[] = { 0, 0, 0, 0 }; 5597 std::bitset<4> InputQuads; 5598 for (unsigned i = 0; i < 8; ++i) { 5599 unsigned *Quad = i < 4 ? LoQuad : HiQuad; 5600 int EltIdx = SVOp->getMaskElt(i); 5601 MaskVals.push_back(EltIdx); 5602 if (EltIdx < 0) { 5603 ++Quad[0]; 5604 ++Quad[1]; 5605 ++Quad[2]; 5606 ++Quad[3]; 5607 continue; 5608 } 5609 ++Quad[EltIdx / 4]; 5610 InputQuads.set(EltIdx / 4); 5611 } 5612 5613 int BestLoQuad = -1; 5614 unsigned MaxQuad = 1; 5615 for (unsigned i = 0; i < 4; ++i) { 5616 if (LoQuad[i] > MaxQuad) { 5617 BestLoQuad = i; 5618 MaxQuad = LoQuad[i]; 5619 } 5620 } 5621 5622 int BestHiQuad = -1; 5623 MaxQuad = 1; 5624 for (unsigned i = 0; i < 4; ++i) { 5625 if (HiQuad[i] > MaxQuad) { 5626 BestHiQuad = i; 5627 MaxQuad = HiQuad[i]; 5628 } 5629 } 5630 5631 // For SSSE3, If all 8 words of the result come from only 1 quadword of each 5632 // of the two input vectors, shuffle them into one input vector so only a 5633 // single pshufb instruction is necessary. If There are more than 2 input 5634 // quads, disable the next transformation since it does not help SSSE3. 5635 bool V1Used = InputQuads[0] || InputQuads[1]; 5636 bool V2Used = InputQuads[2] || InputQuads[3]; 5637 if (Subtarget->hasSSSE3()) { 5638 if (InputQuads.count() == 2 && V1Used && V2Used) { 5639 BestLoQuad = InputQuads[0] ? 0 : 1; 5640 BestHiQuad = InputQuads[2] ? 2 : 3; 5641 } 5642 if (InputQuads.count() > 2) { 5643 BestLoQuad = -1; 5644 BestHiQuad = -1; 5645 } 5646 } 5647 5648 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update 5649 // the shuffle mask. If a quad is scored as -1, that means that it contains 5650 // words from all 4 input quadwords. 5651 SDValue NewV; 5652 if (BestLoQuad >= 0 || BestHiQuad >= 0) { 5653 int MaskV[] = { 5654 BestLoQuad < 0 ? 0 : BestLoQuad, 5655 BestHiQuad < 0 ? 1 : BestHiQuad 5656 }; 5657 NewV = DAG.getVectorShuffle(MVT::v2i64, dl, 5658 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1), 5659 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]); 5660 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV); 5661 5662 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the 5663 // source words for the shuffle, to aid later transformations. 5664 bool AllWordsInNewV = true; 5665 bool InOrder[2] = { true, true }; 5666 for (unsigned i = 0; i != 8; ++i) { 5667 int idx = MaskVals[i]; 5668 if (idx != (int)i) 5669 InOrder[i/4] = false; 5670 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad) 5671 continue; 5672 AllWordsInNewV = false; 5673 break; 5674 } 5675 5676 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV; 5677 if (AllWordsInNewV) { 5678 for (int i = 0; i != 8; ++i) { 5679 int idx = MaskVals[i]; 5680 if (idx < 0) 5681 continue; 5682 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4; 5683 if ((idx != i) && idx < 4) 5684 pshufhw = false; 5685 if ((idx != i) && idx > 3) 5686 pshuflw = false; 5687 } 5688 V1 = NewV; 5689 V2Used = false; 5690 BestLoQuad = 0; 5691 BestHiQuad = 1; 5692 } 5693 5694 // If we've eliminated the use of V2, and the new mask is a pshuflw or 5695 // pshufhw, that's as cheap as it gets. Return the new shuffle. 5696 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) { 5697 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW; 5698 unsigned TargetMask = 0; 5699 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, 5700 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]); 5701 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 5702 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp): 5703 getShufflePSHUFLWImmediate(SVOp); 5704 V1 = NewV.getOperand(0); 5705 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG); 5706 } 5707 } 5708 5709 // If we have SSSE3, and all words of the result are from 1 input vector, 5710 // case 2 is generated, otherwise case 3 is generated. If no SSSE3 5711 // is present, fall back to case 4. 5712 if (Subtarget->hasSSSE3()) { 5713 SmallVector<SDValue,16> pshufbMask; 5714 5715 // If we have elements from both input vectors, set the high bit of the 5716 // shuffle mask element to zero out elements that come from V2 in the V1 5717 // mask, and elements that come from V1 in the V2 mask, so that the two 5718 // results can be OR'd together. 5719 bool TwoInputs = V1Used && V2Used; 5720 for (unsigned i = 0; i != 8; ++i) { 5721 int EltIdx = MaskVals[i] * 2; 5722 int Idx0 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx; 5723 int Idx1 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx+1; 5724 pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8)); 5725 pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8)); 5726 } 5727 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1); 5728 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 5729 DAG.getNode(ISD::BUILD_VECTOR, dl, 5730 MVT::v16i8, &pshufbMask[0], 16)); 5731 if (!TwoInputs) 5732 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5733 5734 // Calculate the shuffle mask for the second input, shuffle it, and 5735 // OR it with the first shuffled input. 5736 pshufbMask.clear(); 5737 for (unsigned i = 0; i != 8; ++i) { 5738 int EltIdx = MaskVals[i] * 2; 5739 int Idx0 = (EltIdx < 16) ? 0x80 : EltIdx - 16; 5740 int Idx1 = (EltIdx < 16) ? 0x80 : EltIdx - 15; 5741 pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8)); 5742 pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8)); 5743 } 5744 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V2); 5745 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 5746 DAG.getNode(ISD::BUILD_VECTOR, dl, 5747 MVT::v16i8, &pshufbMask[0], 16)); 5748 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 5749 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5750 } 5751 5752 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order, 5753 // and update MaskVals with new element order. 5754 std::bitset<8> InOrder; 5755 if (BestLoQuad >= 0) { 5756 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 }; 5757 for (int i = 0; i != 4; ++i) { 5758 int idx = MaskVals[i]; 5759 if (idx < 0) { 5760 InOrder.set(i); 5761 } else if ((idx / 4) == BestLoQuad) { 5762 MaskV[i] = idx & 3; 5763 InOrder.set(i); 5764 } 5765 } 5766 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 5767 &MaskV[0]); 5768 5769 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) { 5770 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 5771 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16, 5772 NewV.getOperand(0), 5773 getShufflePSHUFLWImmediate(SVOp), DAG); 5774 } 5775 } 5776 5777 // If BestHi >= 0, generate a pshufhw to put the high elements in order, 5778 // and update MaskVals with the new element order. 5779 if (BestHiQuad >= 0) { 5780 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 }; 5781 for (unsigned i = 4; i != 8; ++i) { 5782 int idx = MaskVals[i]; 5783 if (idx < 0) { 5784 InOrder.set(i); 5785 } else if ((idx / 4) == BestHiQuad) { 5786 MaskV[i] = (idx & 3) + 4; 5787 InOrder.set(i); 5788 } 5789 } 5790 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 5791 &MaskV[0]); 5792 5793 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) { 5794 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 5795 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16, 5796 NewV.getOperand(0), 5797 getShufflePSHUFHWImmediate(SVOp), DAG); 5798 } 5799 } 5800 5801 // In case BestHi & BestLo were both -1, which means each quadword has a word 5802 // from each of the four input quadwords, calculate the InOrder bitvector now 5803 // before falling through to the insert/extract cleanup. 5804 if (BestLoQuad == -1 && BestHiQuad == -1) { 5805 NewV = V1; 5806 for (int i = 0; i != 8; ++i) 5807 if (MaskVals[i] < 0 || MaskVals[i] == i) 5808 InOrder.set(i); 5809 } 5810 5811 // The other elements are put in the right place using pextrw and pinsrw. 5812 for (unsigned i = 0; i != 8; ++i) { 5813 if (InOrder[i]) 5814 continue; 5815 int EltIdx = MaskVals[i]; 5816 if (EltIdx < 0) 5817 continue; 5818 SDValue ExtOp = (EltIdx < 8) ? 5819 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1, 5820 DAG.getIntPtrConstant(EltIdx)) : 5821 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2, 5822 DAG.getIntPtrConstant(EltIdx - 8)); 5823 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp, 5824 DAG.getIntPtrConstant(i)); 5825 } 5826 return NewV; 5827} 5828 5829// v16i8 shuffles - Prefer shuffles in the following order: 5830// 1. [ssse3] 1 x pshufb 5831// 2. [ssse3] 2 x pshufb + 1 x por 5832// 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw 5833static 5834SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, 5835 SelectionDAG &DAG, 5836 const X86TargetLowering &TLI) { 5837 SDValue V1 = SVOp->getOperand(0); 5838 SDValue V2 = SVOp->getOperand(1); 5839 DebugLoc dl = SVOp->getDebugLoc(); 5840 ArrayRef<int> MaskVals = SVOp->getMask(); 5841 5842 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 5843 5844 // If we have SSSE3, case 1 is generated when all result bytes come from 5845 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is 5846 // present, fall back to case 3. 5847 5848 // If SSSE3, use 1 pshufb instruction per vector with elements in the result. 5849 if (TLI.getSubtarget()->hasSSSE3()) { 5850 SmallVector<SDValue,16> pshufbMask; 5851 5852 // If all result elements are from one input vector, then only translate 5853 // undef mask values to 0x80 (zero out result) in the pshufb mask. 5854 // 5855 // Otherwise, we have elements from both input vectors, and must zero out 5856 // elements that come from V2 in the first mask, and V1 in the second mask 5857 // so that we can OR them together. 5858 for (unsigned i = 0; i != 16; ++i) { 5859 int EltIdx = MaskVals[i]; 5860 if (EltIdx < 0 || EltIdx >= 16) 5861 EltIdx = 0x80; 5862 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 5863 } 5864 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 5865 DAG.getNode(ISD::BUILD_VECTOR, dl, 5866 MVT::v16i8, &pshufbMask[0], 16)); 5867 if (V2IsUndef) 5868 return V1; 5869 5870 // Calculate the shuffle mask for the second input, shuffle it, and 5871 // OR it with the first shuffled input. 5872 pshufbMask.clear(); 5873 for (unsigned i = 0; i != 16; ++i) { 5874 int EltIdx = MaskVals[i]; 5875 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16; 5876 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 5877 } 5878 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 5879 DAG.getNode(ISD::BUILD_VECTOR, dl, 5880 MVT::v16i8, &pshufbMask[0], 16)); 5881 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 5882 } 5883 5884 // No SSSE3 - Calculate in place words and then fix all out of place words 5885 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from 5886 // the 16 different words that comprise the two doublequadword input vectors. 5887 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5888 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 5889 SDValue NewV = V1; 5890 for (int i = 0; i != 8; ++i) { 5891 int Elt0 = MaskVals[i*2]; 5892 int Elt1 = MaskVals[i*2+1]; 5893 5894 // This word of the result is all undef, skip it. 5895 if (Elt0 < 0 && Elt1 < 0) 5896 continue; 5897 5898 // This word of the result is already in the correct place, skip it. 5899 if ((Elt0 == i*2) && (Elt1 == i*2+1)) 5900 continue; 5901 5902 SDValue Elt0Src = Elt0 < 16 ? V1 : V2; 5903 SDValue Elt1Src = Elt1 < 16 ? V1 : V2; 5904 SDValue InsElt; 5905 5906 // If Elt0 and Elt1 are defined, are consecutive, and can be load 5907 // using a single extract together, load it and store it. 5908 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) { 5909 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 5910 DAG.getIntPtrConstant(Elt1 / 2)); 5911 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 5912 DAG.getIntPtrConstant(i)); 5913 continue; 5914 } 5915 5916 // If Elt1 is defined, extract it from the appropriate source. If the 5917 // source byte is not also odd, shift the extracted word left 8 bits 5918 // otherwise clear the bottom 8 bits if we need to do an or. 5919 if (Elt1 >= 0) { 5920 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 5921 DAG.getIntPtrConstant(Elt1 / 2)); 5922 if ((Elt1 & 1) == 0) 5923 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt, 5924 DAG.getConstant(8, 5925 TLI.getShiftAmountTy(InsElt.getValueType()))); 5926 else if (Elt0 >= 0) 5927 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt, 5928 DAG.getConstant(0xFF00, MVT::i16)); 5929 } 5930 // If Elt0 is defined, extract it from the appropriate source. If the 5931 // source byte is not also even, shift the extracted word right 8 bits. If 5932 // Elt1 was also defined, OR the extracted values together before 5933 // inserting them in the result. 5934 if (Elt0 >= 0) { 5935 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, 5936 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2)); 5937 if ((Elt0 & 1) != 0) 5938 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0, 5939 DAG.getConstant(8, 5940 TLI.getShiftAmountTy(InsElt0.getValueType()))); 5941 else if (Elt1 >= 0) 5942 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0, 5943 DAG.getConstant(0x00FF, MVT::i16)); 5944 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0) 5945 : InsElt0; 5946 } 5947 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 5948 DAG.getIntPtrConstant(i)); 5949 } 5950 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV); 5951} 5952 5953/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 5954/// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be 5955/// done when every pair / quad of shuffle mask elements point to elements in 5956/// the right sequence. e.g. 5957/// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15> 5958static 5959SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, 5960 SelectionDAG &DAG, DebugLoc dl) { 5961 MVT VT = SVOp->getValueType(0).getSimpleVT(); 5962 unsigned NumElems = VT.getVectorNumElements(); 5963 MVT NewVT; 5964 unsigned Scale; 5965 switch (VT.SimpleTy) { 5966 default: llvm_unreachable("Unexpected!"); 5967 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break; 5968 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break; 5969 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break; 5970 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break; 5971 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break; 5972 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break; 5973 } 5974 5975 SmallVector<int, 8> MaskVec; 5976 for (unsigned i = 0; i != NumElems; i += Scale) { 5977 int StartIdx = -1; 5978 for (unsigned j = 0; j != Scale; ++j) { 5979 int EltIdx = SVOp->getMaskElt(i+j); 5980 if (EltIdx < 0) 5981 continue; 5982 if (StartIdx < 0) 5983 StartIdx = (EltIdx / Scale); 5984 if (EltIdx != (int)(StartIdx*Scale + j)) 5985 return SDValue(); 5986 } 5987 MaskVec.push_back(StartIdx); 5988 } 5989 5990 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0)); 5991 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1)); 5992 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]); 5993} 5994 5995/// getVZextMovL - Return a zero-extending vector move low node. 5996/// 5997static SDValue getVZextMovL(EVT VT, EVT OpVT, 5998 SDValue SrcOp, SelectionDAG &DAG, 5999 const X86Subtarget *Subtarget, DebugLoc dl) { 6000 if (VT == MVT::v2f64 || VT == MVT::v4f32) { 6001 LoadSDNode *LD = NULL; 6002 if (!isScalarLoadToVector(SrcOp.getNode(), &LD)) 6003 LD = dyn_cast<LoadSDNode>(SrcOp); 6004 if (!LD) { 6005 // movssrr and movsdrr do not clear top bits. Try to use movd, movq 6006 // instead. 6007 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; 6008 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) && 6009 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR && 6010 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST && 6011 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) { 6012 // PR2108 6013 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32; 6014 return DAG.getNode(ISD::BITCAST, dl, VT, 6015 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 6016 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 6017 OpVT, 6018 SrcOp.getOperand(0) 6019 .getOperand(0)))); 6020 } 6021 } 6022 } 6023 6024 return DAG.getNode(ISD::BITCAST, dl, VT, 6025 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 6026 DAG.getNode(ISD::BITCAST, dl, 6027 OpVT, SrcOp))); 6028} 6029 6030/// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles 6031/// which could not be matched by any known target speficic shuffle 6032static SDValue 6033LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 6034 6035 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG); 6036 if (NewOp.getNode()) 6037 return NewOp; 6038 6039 EVT VT = SVOp->getValueType(0); 6040 6041 unsigned NumElems = VT.getVectorNumElements(); 6042 unsigned NumLaneElems = NumElems / 2; 6043 6044 DebugLoc dl = SVOp->getDebugLoc(); 6045 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 6046 EVT NVT = MVT::getVectorVT(EltVT, NumLaneElems); 6047 SDValue Output[2]; 6048 6049 SmallVector<int, 16> Mask; 6050 for (unsigned l = 0; l < 2; ++l) { 6051 // Build a shuffle mask for the output, discovering on the fly which 6052 // input vectors to use as shuffle operands (recorded in InputUsed). 6053 // If building a suitable shuffle vector proves too hard, then bail 6054 // out with UseBuildVector set. 6055 bool UseBuildVector = false; 6056 int InputUsed[2] = { -1, -1 }; // Not yet discovered. 6057 unsigned LaneStart = l * NumLaneElems; 6058 for (unsigned i = 0; i != NumLaneElems; ++i) { 6059 // The mask element. This indexes into the input. 6060 int Idx = SVOp->getMaskElt(i+LaneStart); 6061 if (Idx < 0) { 6062 // the mask element does not index into any input vector. 6063 Mask.push_back(-1); 6064 continue; 6065 } 6066 6067 // The input vector this mask element indexes into. 6068 int Input = Idx / NumLaneElems; 6069 6070 // Turn the index into an offset from the start of the input vector. 6071 Idx -= Input * NumLaneElems; 6072 6073 // Find or create a shuffle vector operand to hold this input. 6074 unsigned OpNo; 6075 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) { 6076 if (InputUsed[OpNo] == Input) 6077 // This input vector is already an operand. 6078 break; 6079 if (InputUsed[OpNo] < 0) { 6080 // Create a new operand for this input vector. 6081 InputUsed[OpNo] = Input; 6082 break; 6083 } 6084 } 6085 6086 if (OpNo >= array_lengthof(InputUsed)) { 6087 // More than two input vectors used! Give up on trying to create a 6088 // shuffle vector. Insert all elements into a BUILD_VECTOR instead. 6089 UseBuildVector = true; 6090 break; 6091 } 6092 6093 // Add the mask index for the new shuffle vector. 6094 Mask.push_back(Idx + OpNo * NumLaneElems); 6095 } 6096 6097 if (UseBuildVector) { 6098 SmallVector<SDValue, 16> SVOps; 6099 for (unsigned i = 0; i != NumLaneElems; ++i) { 6100 // The mask element. This indexes into the input. 6101 int Idx = SVOp->getMaskElt(i+LaneStart); 6102 if (Idx < 0) { 6103 SVOps.push_back(DAG.getUNDEF(EltVT)); 6104 continue; 6105 } 6106 6107 // The input vector this mask element indexes into. 6108 int Input = Idx / NumElems; 6109 6110 // Turn the index into an offset from the start of the input vector. 6111 Idx -= Input * NumElems; 6112 6113 // Extract the vector element by hand. 6114 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 6115 SVOp->getOperand(Input), 6116 DAG.getIntPtrConstant(Idx))); 6117 } 6118 6119 // Construct the output using a BUILD_VECTOR. 6120 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, &SVOps[0], 6121 SVOps.size()); 6122 } else if (InputUsed[0] < 0) { 6123 // No input vectors were used! The result is undefined. 6124 Output[l] = DAG.getUNDEF(NVT); 6125 } else { 6126 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2), 6127 (InputUsed[0] % 2) * NumLaneElems, 6128 DAG, dl); 6129 // If only one input was used, use an undefined vector for the other. 6130 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) : 6131 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2), 6132 (InputUsed[1] % 2) * NumLaneElems, DAG, dl); 6133 // At least one input vector was used. Create a new shuffle vector. 6134 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]); 6135 } 6136 6137 Mask.clear(); 6138 } 6139 6140 // Concatenate the result back 6141 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]); 6142} 6143 6144/// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with 6145/// 4 elements, and match them with several different shuffle types. 6146static SDValue 6147LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 6148 SDValue V1 = SVOp->getOperand(0); 6149 SDValue V2 = SVOp->getOperand(1); 6150 DebugLoc dl = SVOp->getDebugLoc(); 6151 EVT VT = SVOp->getValueType(0); 6152 6153 assert(VT.is128BitVector() && "Unsupported vector size"); 6154 6155 std::pair<int, int> Locs[4]; 6156 int Mask1[] = { -1, -1, -1, -1 }; 6157 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end()); 6158 6159 unsigned NumHi = 0; 6160 unsigned NumLo = 0; 6161 for (unsigned i = 0; i != 4; ++i) { 6162 int Idx = PermMask[i]; 6163 if (Idx < 0) { 6164 Locs[i] = std::make_pair(-1, -1); 6165 } else { 6166 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!"); 6167 if (Idx < 4) { 6168 Locs[i] = std::make_pair(0, NumLo); 6169 Mask1[NumLo] = Idx; 6170 NumLo++; 6171 } else { 6172 Locs[i] = std::make_pair(1, NumHi); 6173 if (2+NumHi < 4) 6174 Mask1[2+NumHi] = Idx; 6175 NumHi++; 6176 } 6177 } 6178 } 6179 6180 if (NumLo <= 2 && NumHi <= 2) { 6181 // If no more than two elements come from either vector. This can be 6182 // implemented with two shuffles. First shuffle gather the elements. 6183 // The second shuffle, which takes the first shuffle as both of its 6184 // vector operands, put the elements into the right order. 6185 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6186 6187 int Mask2[] = { -1, -1, -1, -1 }; 6188 6189 for (unsigned i = 0; i != 4; ++i) 6190 if (Locs[i].first != -1) { 6191 unsigned Idx = (i < 2) ? 0 : 4; 6192 Idx += Locs[i].first * 2 + Locs[i].second; 6193 Mask2[i] = Idx; 6194 } 6195 6196 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]); 6197 } 6198 6199 if (NumLo == 3 || NumHi == 3) { 6200 // Otherwise, we must have three elements from one vector, call it X, and 6201 // one element from the other, call it Y. First, use a shufps to build an 6202 // intermediate vector with the one element from Y and the element from X 6203 // that will be in the same half in the final destination (the indexes don't 6204 // matter). Then, use a shufps to build the final vector, taking the half 6205 // containing the element from Y from the intermediate, and the other half 6206 // from X. 6207 if (NumHi == 3) { 6208 // Normalize it so the 3 elements come from V1. 6209 CommuteVectorShuffleMask(PermMask, 4); 6210 std::swap(V1, V2); 6211 } 6212 6213 // Find the element from V2. 6214 unsigned HiIndex; 6215 for (HiIndex = 0; HiIndex < 3; ++HiIndex) { 6216 int Val = PermMask[HiIndex]; 6217 if (Val < 0) 6218 continue; 6219 if (Val >= 4) 6220 break; 6221 } 6222 6223 Mask1[0] = PermMask[HiIndex]; 6224 Mask1[1] = -1; 6225 Mask1[2] = PermMask[HiIndex^1]; 6226 Mask1[3] = -1; 6227 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6228 6229 if (HiIndex >= 2) { 6230 Mask1[0] = PermMask[0]; 6231 Mask1[1] = PermMask[1]; 6232 Mask1[2] = HiIndex & 1 ? 6 : 4; 6233 Mask1[3] = HiIndex & 1 ? 4 : 6; 6234 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6235 } 6236 6237 Mask1[0] = HiIndex & 1 ? 2 : 0; 6238 Mask1[1] = HiIndex & 1 ? 0 : 2; 6239 Mask1[2] = PermMask[2]; 6240 Mask1[3] = PermMask[3]; 6241 if (Mask1[2] >= 0) 6242 Mask1[2] += 4; 6243 if (Mask1[3] >= 0) 6244 Mask1[3] += 4; 6245 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]); 6246 } 6247 6248 // Break it into (shuffle shuffle_hi, shuffle_lo). 6249 int LoMask[] = { -1, -1, -1, -1 }; 6250 int HiMask[] = { -1, -1, -1, -1 }; 6251 6252 int *MaskPtr = LoMask; 6253 unsigned MaskIdx = 0; 6254 unsigned LoIdx = 0; 6255 unsigned HiIdx = 2; 6256 for (unsigned i = 0; i != 4; ++i) { 6257 if (i == 2) { 6258 MaskPtr = HiMask; 6259 MaskIdx = 1; 6260 LoIdx = 0; 6261 HiIdx = 2; 6262 } 6263 int Idx = PermMask[i]; 6264 if (Idx < 0) { 6265 Locs[i] = std::make_pair(-1, -1); 6266 } else if (Idx < 4) { 6267 Locs[i] = std::make_pair(MaskIdx, LoIdx); 6268 MaskPtr[LoIdx] = Idx; 6269 LoIdx++; 6270 } else { 6271 Locs[i] = std::make_pair(MaskIdx, HiIdx); 6272 MaskPtr[HiIdx] = Idx; 6273 HiIdx++; 6274 } 6275 } 6276 6277 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]); 6278 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]); 6279 int MaskOps[] = { -1, -1, -1, -1 }; 6280 for (unsigned i = 0; i != 4; ++i) 6281 if (Locs[i].first != -1) 6282 MaskOps[i] = Locs[i].first * 4 + Locs[i].second; 6283 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]); 6284} 6285 6286static bool MayFoldVectorLoad(SDValue V) { 6287 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 6288 V = V.getOperand(0); 6289 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 6290 V = V.getOperand(0); 6291 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR && 6292 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF) 6293 // BUILD_VECTOR (load), undef 6294 V = V.getOperand(0); 6295 if (MayFoldLoad(V)) 6296 return true; 6297 return false; 6298} 6299 6300// FIXME: the version above should always be used. Since there's 6301// a bug where several vector shuffles can't be folded because the 6302// DAG is not updated during lowering and a node claims to have two 6303// uses while it only has one, use this version, and let isel match 6304// another instruction if the load really happens to have more than 6305// one use. Remove this version after this bug get fixed. 6306// rdar://8434668, PR8156 6307static bool RelaxedMayFoldVectorLoad(SDValue V) { 6308 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 6309 V = V.getOperand(0); 6310 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 6311 V = V.getOperand(0); 6312 if (ISD::isNormalLoad(V.getNode())) 6313 return true; 6314 return false; 6315} 6316 6317static 6318SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) { 6319 EVT VT = Op.getValueType(); 6320 6321 // Canonizalize to v2f64. 6322 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 6323 return DAG.getNode(ISD::BITCAST, dl, VT, 6324 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64, 6325 V1, DAG)); 6326} 6327 6328static 6329SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, 6330 bool HasSSE2) { 6331 SDValue V1 = Op.getOperand(0); 6332 SDValue V2 = Op.getOperand(1); 6333 EVT VT = Op.getValueType(); 6334 6335 assert(VT != MVT::v2i64 && "unsupported shuffle type"); 6336 6337 if (HasSSE2 && VT == MVT::v2f64) 6338 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG); 6339 6340 // v4f32 or v4i32: canonizalized to v4f32 (which is legal for SSE1) 6341 return DAG.getNode(ISD::BITCAST, dl, VT, 6342 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32, 6343 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1), 6344 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG)); 6345} 6346 6347static 6348SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) { 6349 SDValue V1 = Op.getOperand(0); 6350 SDValue V2 = Op.getOperand(1); 6351 EVT VT = Op.getValueType(); 6352 6353 assert((VT == MVT::v4i32 || VT == MVT::v4f32) && 6354 "unsupported shuffle type"); 6355 6356 if (V2.getOpcode() == ISD::UNDEF) 6357 V2 = V1; 6358 6359 // v4i32 or v4f32 6360 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG); 6361} 6362 6363static 6364SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) { 6365 SDValue V1 = Op.getOperand(0); 6366 SDValue V2 = Op.getOperand(1); 6367 EVT VT = Op.getValueType(); 6368 unsigned NumElems = VT.getVectorNumElements(); 6369 6370 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second 6371 // operand of these instructions is only memory, so check if there's a 6372 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the 6373 // same masks. 6374 bool CanFoldLoad = false; 6375 6376 // Trivial case, when V2 comes from a load. 6377 if (MayFoldVectorLoad(V2)) 6378 CanFoldLoad = true; 6379 6380 // When V1 is a load, it can be folded later into a store in isel, example: 6381 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1) 6382 // turns into: 6383 // (MOVLPSmr addr:$src1, VR128:$src2) 6384 // So, recognize this potential and also use MOVLPS or MOVLPD 6385 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op)) 6386 CanFoldLoad = true; 6387 6388 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6389 if (CanFoldLoad) { 6390 if (HasSSE2 && NumElems == 2) 6391 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG); 6392 6393 if (NumElems == 4) 6394 // If we don't care about the second element, proceed to use movss. 6395 if (SVOp->getMaskElt(1) != -1) 6396 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG); 6397 } 6398 6399 // movl and movlp will both match v2i64, but v2i64 is never matched by 6400 // movl earlier because we make it strict to avoid messing with the movlp load 6401 // folding logic (see the code above getMOVLP call). Match it here then, 6402 // this is horrible, but will stay like this until we move all shuffle 6403 // matching to x86 specific nodes. Note that for the 1st condition all 6404 // types are matched with movsd. 6405 if (HasSSE2) { 6406 // FIXME: isMOVLMask should be checked and matched before getMOVLP, 6407 // as to remove this logic from here, as much as possible 6408 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT)) 6409 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 6410 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 6411 } 6412 6413 assert(VT != MVT::v4i32 && "unsupported shuffle type"); 6414 6415 // Invert the operand order and use SHUFPS to match it. 6416 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1, 6417 getShuffleSHUFImmediate(SVOp), DAG); 6418} 6419 6420SDValue 6421X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const { 6422 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6423 EVT VT = Op.getValueType(); 6424 DebugLoc dl = Op.getDebugLoc(); 6425 SDValue V1 = Op.getOperand(0); 6426 SDValue V2 = Op.getOperand(1); 6427 6428 if (isZeroShuffle(SVOp)) 6429 return getZeroVector(VT, Subtarget, DAG, dl); 6430 6431 // Handle splat operations 6432 if (SVOp->isSplat()) { 6433 unsigned NumElem = VT.getVectorNumElements(); 6434 int Size = VT.getSizeInBits(); 6435 6436 // Use vbroadcast whenever the splat comes from a foldable load 6437 SDValue Broadcast = LowerVectorBroadcast(Op, DAG); 6438 if (Broadcast.getNode()) 6439 return Broadcast; 6440 6441 // Handle splats by matching through known shuffle masks 6442 if ((Size == 128 && NumElem <= 4) || 6443 (Size == 256 && NumElem < 8)) 6444 return SDValue(); 6445 6446 // All remaning splats are promoted to target supported vector shuffles. 6447 return PromoteSplat(SVOp, DAG); 6448 } 6449 6450 // If the shuffle can be profitably rewritten as a narrower shuffle, then 6451 // do it! 6452 if (VT == MVT::v8i16 || VT == MVT::v16i8 || 6453 VT == MVT::v16i16 || VT == MVT::v32i8) { 6454 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6455 if (NewOp.getNode()) 6456 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp); 6457 } else if ((VT == MVT::v4i32 || 6458 (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 6459 // FIXME: Figure out a cleaner way to do this. 6460 // Try to make use of movq to zero out the top part. 6461 if (ISD::isBuildVectorAllZeros(V2.getNode())) { 6462 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6463 if (NewOp.getNode()) { 6464 EVT NewVT = NewOp.getValueType(); 6465 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), 6466 NewVT, true, false)) 6467 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), 6468 DAG, Subtarget, dl); 6469 } 6470 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) { 6471 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6472 if (NewOp.getNode()) { 6473 EVT NewVT = NewOp.getValueType(); 6474 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT)) 6475 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), 6476 DAG, Subtarget, dl); 6477 } 6478 } 6479 } 6480 return SDValue(); 6481} 6482 6483SDValue 6484X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { 6485 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6486 SDValue V1 = Op.getOperand(0); 6487 SDValue V2 = Op.getOperand(1); 6488 EVT VT = Op.getValueType(); 6489 DebugLoc dl = Op.getDebugLoc(); 6490 unsigned NumElems = VT.getVectorNumElements(); 6491 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 6492 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 6493 bool V1IsSplat = false; 6494 bool V2IsSplat = false; 6495 bool HasSSE2 = Subtarget->hasSSE2(); 6496 bool HasAVX = Subtarget->hasAVX(); 6497 bool HasAVX2 = Subtarget->hasAVX2(); 6498 MachineFunction &MF = DAG.getMachineFunction(); 6499 bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize); 6500 6501 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles"); 6502 6503 if (V1IsUndef && V2IsUndef) 6504 return DAG.getUNDEF(VT); 6505 6506 assert(!V1IsUndef && "Op 1 of shuffle should not be undef"); 6507 6508 // Vector shuffle lowering takes 3 steps: 6509 // 6510 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable 6511 // narrowing and commutation of operands should be handled. 6512 // 2) Matching of shuffles with known shuffle masks to x86 target specific 6513 // shuffle nodes. 6514 // 3) Rewriting of unmatched masks into new generic shuffle operations, 6515 // so the shuffle can be broken into other shuffles and the legalizer can 6516 // try the lowering again. 6517 // 6518 // The general idea is that no vector_shuffle operation should be left to 6519 // be matched during isel, all of them must be converted to a target specific 6520 // node here. 6521 6522 // Normalize the input vectors. Here splats, zeroed vectors, profitable 6523 // narrowing and commutation of operands should be handled. The actual code 6524 // doesn't include all of those, work in progress... 6525 SDValue NewOp = NormalizeVectorShuffle(Op, DAG); 6526 if (NewOp.getNode()) 6527 return NewOp; 6528 6529 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end()); 6530 6531 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and 6532 // unpckh_undef). Only use pshufd if speed is more important than size. 6533 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasAVX2)) 6534 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6535 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasAVX2)) 6536 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6537 6538 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() && 6539 V2IsUndef && RelaxedMayFoldVectorLoad(V1)) 6540 return getMOVDDup(Op, dl, V1, DAG); 6541 6542 if (isMOVHLPS_v_undef_Mask(M, VT)) 6543 return getMOVHighToLow(Op, dl, DAG); 6544 6545 // Use to match splats 6546 if (HasSSE2 && isUNPCKHMask(M, VT, HasAVX2) && V2IsUndef && 6547 (VT == MVT::v2f64 || VT == MVT::v2i64)) 6548 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6549 6550 if (isPSHUFDMask(M, VT)) { 6551 // The actual implementation will match the mask in the if above and then 6552 // during isel it can match several different instructions, not only pshufd 6553 // as its name says, sad but true, emulate the behavior for now... 6554 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64))) 6555 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG); 6556 6557 unsigned TargetMask = getShuffleSHUFImmediate(SVOp); 6558 6559 if (HasAVX && (VT == MVT::v4f32 || VT == MVT::v2f64)) 6560 return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1, TargetMask, DAG); 6561 6562 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32)) 6563 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG); 6564 6565 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1, 6566 TargetMask, DAG); 6567 } 6568 6569 // Check if this can be converted into a logical shift. 6570 bool isLeft = false; 6571 unsigned ShAmt = 0; 6572 SDValue ShVal; 6573 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt); 6574 if (isShift && ShVal.hasOneUse()) { 6575 // If the shifted value has multiple uses, it may be cheaper to use 6576 // v_set0 + movlhps or movhlps, etc. 6577 EVT EltVT = VT.getVectorElementType(); 6578 ShAmt *= EltVT.getSizeInBits(); 6579 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 6580 } 6581 6582 if (isMOVLMask(M, VT)) { 6583 if (ISD::isBuildVectorAllZeros(V1.getNode())) 6584 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl); 6585 if (!isMOVLPMask(M, VT)) { 6586 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) 6587 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 6588 6589 if (VT == MVT::v4i32 || VT == MVT::v4f32) 6590 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 6591 } 6592 } 6593 6594 // FIXME: fold these into legal mask. 6595 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasAVX2)) 6596 return getMOVLowToHigh(Op, dl, DAG, HasSSE2); 6597 6598 if (isMOVHLPSMask(M, VT)) 6599 return getMOVHighToLow(Op, dl, DAG); 6600 6601 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget)) 6602 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG); 6603 6604 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget)) 6605 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG); 6606 6607 if (isMOVLPMask(M, VT)) 6608 return getMOVLP(Op, dl, DAG, HasSSE2); 6609 6610 if (ShouldXformToMOVHLPS(M, VT) || 6611 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT)) 6612 return CommuteVectorShuffle(SVOp, DAG); 6613 6614 if (isShift) { 6615 // No better options. Use a vshldq / vsrldq. 6616 EVT EltVT = VT.getVectorElementType(); 6617 ShAmt *= EltVT.getSizeInBits(); 6618 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 6619 } 6620 6621 bool Commuted = false; 6622 // FIXME: This should also accept a bitcast of a splat? Be careful, not 6623 // 1,1,1,1 -> v8i16 though. 6624 V1IsSplat = isSplatVector(V1.getNode()); 6625 V2IsSplat = isSplatVector(V2.getNode()); 6626 6627 // Canonicalize the splat or undef, if present, to be on the RHS. 6628 if (!V2IsUndef && V1IsSplat && !V2IsSplat) { 6629 CommuteVectorShuffleMask(M, NumElems); 6630 std::swap(V1, V2); 6631 std::swap(V1IsSplat, V2IsSplat); 6632 Commuted = true; 6633 } 6634 6635 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) { 6636 // Shuffling low element of v1 into undef, just return v1. 6637 if (V2IsUndef) 6638 return V1; 6639 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which 6640 // the instruction selector will not match, so get a canonical MOVL with 6641 // swapped operands to undo the commute. 6642 return getMOVL(DAG, dl, VT, V2, V1); 6643 } 6644 6645 if (isUNPCKLMask(M, VT, HasAVX2)) 6646 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 6647 6648 if (isUNPCKHMask(M, VT, HasAVX2)) 6649 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 6650 6651 if (V2IsSplat) { 6652 // Normalize mask so all entries that point to V2 points to its first 6653 // element then try to match unpck{h|l} again. If match, return a 6654 // new vector_shuffle with the corrected mask.p 6655 SmallVector<int, 8> NewMask(M.begin(), M.end()); 6656 NormalizeMask(NewMask, NumElems); 6657 if (isUNPCKLMask(NewMask, VT, HasAVX2, true)) 6658 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 6659 if (isUNPCKHMask(NewMask, VT, HasAVX2, true)) 6660 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 6661 } 6662 6663 if (Commuted) { 6664 // Commute is back and try unpck* again. 6665 // FIXME: this seems wrong. 6666 CommuteVectorShuffleMask(M, NumElems); 6667 std::swap(V1, V2); 6668 std::swap(V1IsSplat, V2IsSplat); 6669 Commuted = false; 6670 6671 if (isUNPCKLMask(M, VT, HasAVX2)) 6672 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 6673 6674 if (isUNPCKHMask(M, VT, HasAVX2)) 6675 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 6676 } 6677 6678 // Normalize the node to match x86 shuffle ops if needed 6679 if (!V2IsUndef && (isSHUFPMask(M, VT, HasAVX, /* Commuted */ true))) 6680 return CommuteVectorShuffle(SVOp, DAG); 6681 6682 // The checks below are all present in isShuffleMaskLegal, but they are 6683 // inlined here right now to enable us to directly emit target specific 6684 // nodes, and remove one by one until they don't return Op anymore. 6685 6686 if (isPALIGNRMask(M, VT, Subtarget)) 6687 return getTargetShuffleNode(X86ISD::PALIGN, dl, VT, V1, V2, 6688 getShufflePALIGNRImmediate(SVOp), 6689 DAG); 6690 6691 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) && 6692 SVOp->getSplatIndex() == 0 && V2IsUndef) { 6693 if (VT == MVT::v2f64 || VT == MVT::v2i64) 6694 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6695 } 6696 6697 if (isPSHUFHWMask(M, VT, HasAVX2)) 6698 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1, 6699 getShufflePSHUFHWImmediate(SVOp), 6700 DAG); 6701 6702 if (isPSHUFLWMask(M, VT, HasAVX2)) 6703 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1, 6704 getShufflePSHUFLWImmediate(SVOp), 6705 DAG); 6706 6707 if (isSHUFPMask(M, VT, HasAVX)) 6708 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2, 6709 getShuffleSHUFImmediate(SVOp), DAG); 6710 6711 if (isUNPCKL_v_undef_Mask(M, VT, HasAVX2)) 6712 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6713 if (isUNPCKH_v_undef_Mask(M, VT, HasAVX2)) 6714 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6715 6716 //===--------------------------------------------------------------------===// 6717 // Generate target specific nodes for 128 or 256-bit shuffles only 6718 // supported in the AVX instruction set. 6719 // 6720 6721 // Handle VMOVDDUPY permutations 6722 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasAVX)) 6723 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG); 6724 6725 // Handle VPERMILPS/D* permutations 6726 if (isVPERMILPMask(M, VT, HasAVX)) { 6727 if (HasAVX2 && VT == MVT::v8i32) 6728 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, 6729 getShuffleSHUFImmediate(SVOp), DAG); 6730 return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1, 6731 getShuffleSHUFImmediate(SVOp), DAG); 6732 } 6733 6734 // Handle VPERM2F128/VPERM2I128 permutations 6735 if (isVPERM2X128Mask(M, VT, HasAVX)) 6736 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1, 6737 V2, getShuffleVPERM2X128Immediate(SVOp), DAG); 6738 6739 SDValue BlendOp = LowerVECTOR_SHUFFLEtoBlend(SVOp, Subtarget, DAG); 6740 if (BlendOp.getNode()) 6741 return BlendOp; 6742 6743 if (V2IsUndef && HasAVX2 && (VT == MVT::v8i32 || VT == MVT::v8f32)) { 6744 SmallVector<SDValue, 8> permclMask; 6745 for (unsigned i = 0; i != 8; ++i) { 6746 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MVT::i32)); 6747 } 6748 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, 6749 &permclMask[0], 8); 6750 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32 6751 return DAG.getNode(X86ISD::VPERMV, dl, VT, 6752 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1); 6753 } 6754 6755 if (V2IsUndef && HasAVX2 && (VT == MVT::v4i64 || VT == MVT::v4f64)) 6756 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, 6757 getShuffleCLImmediate(SVOp), DAG); 6758 6759 6760 //===--------------------------------------------------------------------===// 6761 // Since no target specific shuffle was selected for this generic one, 6762 // lower it into other known shuffles. FIXME: this isn't true yet, but 6763 // this is the plan. 6764 // 6765 6766 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 6767 if (VT == MVT::v8i16) { 6768 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, DAG); 6769 if (NewOp.getNode()) 6770 return NewOp; 6771 } 6772 6773 if (VT == MVT::v16i8) { 6774 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, DAG, *this); 6775 if (NewOp.getNode()) 6776 return NewOp; 6777 } 6778 6779 // Handle all 128-bit wide vectors with 4 elements, and match them with 6780 // several different shuffle types. 6781 if (NumElems == 4 && VT.is128BitVector()) 6782 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG); 6783 6784 // Handle general 256-bit shuffles 6785 if (VT.is256BitVector()) 6786 return LowerVECTOR_SHUFFLE_256(SVOp, DAG); 6787 6788 return SDValue(); 6789} 6790 6791SDValue 6792X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, 6793 SelectionDAG &DAG) const { 6794 EVT VT = Op.getValueType(); 6795 DebugLoc dl = Op.getDebugLoc(); 6796 6797 if (!Op.getOperand(0).getValueType().is128BitVector()) 6798 return SDValue(); 6799 6800 if (VT.getSizeInBits() == 8) { 6801 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, 6802 Op.getOperand(0), Op.getOperand(1)); 6803 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 6804 DAG.getValueType(VT)); 6805 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 6806 } 6807 6808 if (VT.getSizeInBits() == 16) { 6809 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6810 // If Idx is 0, it's cheaper to do a move instead of a pextrw. 6811 if (Idx == 0) 6812 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 6813 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 6814 DAG.getNode(ISD::BITCAST, dl, 6815 MVT::v4i32, 6816 Op.getOperand(0)), 6817 Op.getOperand(1))); 6818 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, 6819 Op.getOperand(0), Op.getOperand(1)); 6820 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 6821 DAG.getValueType(VT)); 6822 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 6823 } 6824 6825 if (VT == MVT::f32) { 6826 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy 6827 // the result back to FR32 register. It's only worth matching if the 6828 // result has a single use which is a store or a bitcast to i32. And in 6829 // the case of a store, it's not worth it if the index is a constant 0, 6830 // because a MOVSSmr can be used instead, which is smaller and faster. 6831 if (!Op.hasOneUse()) 6832 return SDValue(); 6833 SDNode *User = *Op.getNode()->use_begin(); 6834 if ((User->getOpcode() != ISD::STORE || 6835 (isa<ConstantSDNode>(Op.getOperand(1)) && 6836 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) && 6837 (User->getOpcode() != ISD::BITCAST || 6838 User->getValueType(0) != MVT::i32)) 6839 return SDValue(); 6840 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 6841 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, 6842 Op.getOperand(0)), 6843 Op.getOperand(1)); 6844 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract); 6845 } 6846 6847 if (VT == MVT::i32 || VT == MVT::i64) { 6848 // ExtractPS/pextrq works with constant index. 6849 if (isa<ConstantSDNode>(Op.getOperand(1))) 6850 return Op; 6851 } 6852 return SDValue(); 6853} 6854 6855 6856SDValue 6857X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 6858 SelectionDAG &DAG) const { 6859 if (!isa<ConstantSDNode>(Op.getOperand(1))) 6860 return SDValue(); 6861 6862 SDValue Vec = Op.getOperand(0); 6863 EVT VecVT = Vec.getValueType(); 6864 6865 // If this is a 256-bit vector result, first extract the 128-bit vector and 6866 // then extract the element from the 128-bit vector. 6867 if (VecVT.is256BitVector()) { 6868 DebugLoc dl = Op.getNode()->getDebugLoc(); 6869 unsigned NumElems = VecVT.getVectorNumElements(); 6870 SDValue Idx = Op.getOperand(1); 6871 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 6872 6873 // Get the 128-bit vector. 6874 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl); 6875 6876 if (IdxVal >= NumElems/2) 6877 IdxVal -= NumElems/2; 6878 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec, 6879 DAG.getConstant(IdxVal, MVT::i32)); 6880 } 6881 6882 assert(VecVT.is128BitVector() && "Unexpected vector length"); 6883 6884 if (Subtarget->hasSSE41()) { 6885 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 6886 if (Res.getNode()) 6887 return Res; 6888 } 6889 6890 EVT VT = Op.getValueType(); 6891 DebugLoc dl = Op.getDebugLoc(); 6892 // TODO: handle v16i8. 6893 if (VT.getSizeInBits() == 16) { 6894 SDValue Vec = Op.getOperand(0); 6895 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6896 if (Idx == 0) 6897 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 6898 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 6899 DAG.getNode(ISD::BITCAST, dl, 6900 MVT::v4i32, Vec), 6901 Op.getOperand(1))); 6902 // Transform it so it match pextrw which produces a 32-bit result. 6903 EVT EltVT = MVT::i32; 6904 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, 6905 Op.getOperand(0), Op.getOperand(1)); 6906 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract, 6907 DAG.getValueType(VT)); 6908 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 6909 } 6910 6911 if (VT.getSizeInBits() == 32) { 6912 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6913 if (Idx == 0) 6914 return Op; 6915 6916 // SHUFPS the element to the lowest double word, then movss. 6917 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 }; 6918 EVT VVT = Op.getOperand(0).getValueType(); 6919 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 6920 DAG.getUNDEF(VVT), Mask); 6921 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 6922 DAG.getIntPtrConstant(0)); 6923 } 6924 6925 if (VT.getSizeInBits() == 64) { 6926 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 6927 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 6928 // to match extract_elt for f64. 6929 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6930 if (Idx == 0) 6931 return Op; 6932 6933 // UNPCKHPD the element to the lowest double word, then movsd. 6934 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 6935 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 6936 int Mask[2] = { 1, -1 }; 6937 EVT VVT = Op.getOperand(0).getValueType(); 6938 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 6939 DAG.getUNDEF(VVT), Mask); 6940 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 6941 DAG.getIntPtrConstant(0)); 6942 } 6943 6944 return SDValue(); 6945} 6946 6947SDValue 6948X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, 6949 SelectionDAG &DAG) const { 6950 EVT VT = Op.getValueType(); 6951 EVT EltVT = VT.getVectorElementType(); 6952 DebugLoc dl = Op.getDebugLoc(); 6953 6954 SDValue N0 = Op.getOperand(0); 6955 SDValue N1 = Op.getOperand(1); 6956 SDValue N2 = Op.getOperand(2); 6957 6958 if (!VT.is128BitVector()) 6959 return SDValue(); 6960 6961 if ((EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) && 6962 isa<ConstantSDNode>(N2)) { 6963 unsigned Opc; 6964 if (VT == MVT::v8i16) 6965 Opc = X86ISD::PINSRW; 6966 else if (VT == MVT::v16i8) 6967 Opc = X86ISD::PINSRB; 6968 else 6969 Opc = X86ISD::PINSRB; 6970 6971 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 6972 // argument. 6973 if (N1.getValueType() != MVT::i32) 6974 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 6975 if (N2.getValueType() != MVT::i32) 6976 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 6977 return DAG.getNode(Opc, dl, VT, N0, N1, N2); 6978 } 6979 6980 if (EltVT == MVT::f32 && isa<ConstantSDNode>(N2)) { 6981 // Bits [7:6] of the constant are the source select. This will always be 6982 // zero here. The DAG Combiner may combine an extract_elt index into these 6983 // bits. For example (insert (extract, 3), 2) could be matched by putting 6984 // the '3' into bits [7:6] of X86ISD::INSERTPS. 6985 // Bits [5:4] of the constant are the destination select. This is the 6986 // value of the incoming immediate. 6987 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 6988 // combine either bitwise AND or insert of float 0.0 to set these bits. 6989 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4); 6990 // Create this as a scalar to vector.. 6991 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1); 6992 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2); 6993 } 6994 6995 if ((EltVT == MVT::i32 || EltVT == MVT::i64) && isa<ConstantSDNode>(N2)) { 6996 // PINSR* works with constant index. 6997 return Op; 6998 } 6999 return SDValue(); 7000} 7001 7002SDValue 7003X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { 7004 EVT VT = Op.getValueType(); 7005 EVT EltVT = VT.getVectorElementType(); 7006 7007 DebugLoc dl = Op.getDebugLoc(); 7008 SDValue N0 = Op.getOperand(0); 7009 SDValue N1 = Op.getOperand(1); 7010 SDValue N2 = Op.getOperand(2); 7011 7012 // If this is a 256-bit vector result, first extract the 128-bit vector, 7013 // insert the element into the extracted half and then place it back. 7014 if (VT.is256BitVector()) { 7015 if (!isa<ConstantSDNode>(N2)) 7016 return SDValue(); 7017 7018 // Get the desired 128-bit vector half. 7019 unsigned NumElems = VT.getVectorNumElements(); 7020 unsigned IdxVal = cast<ConstantSDNode>(N2)->getZExtValue(); 7021 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl); 7022 7023 // Insert the element into the desired half. 7024 bool Upper = IdxVal >= NumElems/2; 7025 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1, 7026 DAG.getConstant(Upper ? IdxVal-NumElems/2 : IdxVal, MVT::i32)); 7027 7028 // Insert the changed part back to the 256-bit vector 7029 return Insert128BitVector(N0, V, IdxVal, DAG, dl); 7030 } 7031 7032 if (Subtarget->hasSSE41()) 7033 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 7034 7035 if (EltVT == MVT::i8) 7036 return SDValue(); 7037 7038 if (EltVT.getSizeInBits() == 16 && isa<ConstantSDNode>(N2)) { 7039 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 7040 // as its second argument. 7041 if (N1.getValueType() != MVT::i32) 7042 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 7043 if (N2.getValueType() != MVT::i32) 7044 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 7045 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2); 7046 } 7047 return SDValue(); 7048} 7049 7050SDValue 7051X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const { 7052 LLVMContext *Context = DAG.getContext(); 7053 DebugLoc dl = Op.getDebugLoc(); 7054 EVT OpVT = Op.getValueType(); 7055 7056 // If this is a 256-bit vector result, first insert into a 128-bit 7057 // vector and then insert into the 256-bit vector. 7058 if (!OpVT.is128BitVector()) { 7059 // Insert into a 128-bit vector. 7060 EVT VT128 = EVT::getVectorVT(*Context, 7061 OpVT.getVectorElementType(), 7062 OpVT.getVectorNumElements() / 2); 7063 7064 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0)); 7065 7066 // Insert the 128-bit vector. 7067 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl); 7068 } 7069 7070 if (OpVT == MVT::v1i64 && 7071 Op.getOperand(0).getValueType() == MVT::i64) 7072 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0)); 7073 7074 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0)); 7075 assert(OpVT.is128BitVector() && "Expected an SSE type!"); 7076 return DAG.getNode(ISD::BITCAST, dl, OpVT, 7077 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt)); 7078} 7079 7080// Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in 7081// a simple subregister reference or explicit instructions to grab 7082// upper bits of a vector. 7083SDValue 7084X86TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const { 7085 if (Subtarget->hasAVX()) { 7086 DebugLoc dl = Op.getNode()->getDebugLoc(); 7087 SDValue Vec = Op.getNode()->getOperand(0); 7088 SDValue Idx = Op.getNode()->getOperand(1); 7089 7090 if (Op.getNode()->getValueType(0).is128BitVector() && 7091 Vec.getNode()->getValueType(0).is256BitVector() && 7092 isa<ConstantSDNode>(Idx)) { 7093 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7094 return Extract128BitVector(Vec, IdxVal, DAG, dl); 7095 } 7096 } 7097 return SDValue(); 7098} 7099 7100// Lower a node with an INSERT_SUBVECTOR opcode. This may result in a 7101// simple superregister reference or explicit instructions to insert 7102// the upper bits of a vector. 7103SDValue 7104X86TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const { 7105 if (Subtarget->hasAVX()) { 7106 DebugLoc dl = Op.getNode()->getDebugLoc(); 7107 SDValue Vec = Op.getNode()->getOperand(0); 7108 SDValue SubVec = Op.getNode()->getOperand(1); 7109 SDValue Idx = Op.getNode()->getOperand(2); 7110 7111 if (Op.getNode()->getValueType(0).is256BitVector() && 7112 SubVec.getNode()->getValueType(0).is128BitVector() && 7113 isa<ConstantSDNode>(Idx)) { 7114 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7115 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl); 7116 } 7117 } 7118 return SDValue(); 7119} 7120 7121// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 7122// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 7123// one of the above mentioned nodes. It has to be wrapped because otherwise 7124// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 7125// be used to form addressing mode. These wrapped nodes will be selected 7126// into MOV32ri. 7127SDValue 7128X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { 7129 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 7130 7131 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7132 // global base reg. 7133 unsigned char OpFlag = 0; 7134 unsigned WrapperKind = X86ISD::Wrapper; 7135 CodeModel::Model M = getTargetMachine().getCodeModel(); 7136 7137 if (Subtarget->isPICStyleRIPRel() && 7138 (M == CodeModel::Small || M == CodeModel::Kernel)) 7139 WrapperKind = X86ISD::WrapperRIP; 7140 else if (Subtarget->isPICStyleGOT()) 7141 OpFlag = X86II::MO_GOTOFF; 7142 else if (Subtarget->isPICStyleStubPIC()) 7143 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7144 7145 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(), 7146 CP->getAlignment(), 7147 CP->getOffset(), OpFlag); 7148 DebugLoc DL = CP->getDebugLoc(); 7149 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7150 // With PIC, the address is actually $g + Offset. 7151 if (OpFlag) { 7152 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7153 DAG.getNode(X86ISD::GlobalBaseReg, 7154 DebugLoc(), getPointerTy()), 7155 Result); 7156 } 7157 7158 return Result; 7159} 7160 7161SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 7162 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 7163 7164 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7165 // global base reg. 7166 unsigned char OpFlag = 0; 7167 unsigned WrapperKind = X86ISD::Wrapper; 7168 CodeModel::Model M = getTargetMachine().getCodeModel(); 7169 7170 if (Subtarget->isPICStyleRIPRel() && 7171 (M == CodeModel::Small || M == CodeModel::Kernel)) 7172 WrapperKind = X86ISD::WrapperRIP; 7173 else if (Subtarget->isPICStyleGOT()) 7174 OpFlag = X86II::MO_GOTOFF; 7175 else if (Subtarget->isPICStyleStubPIC()) 7176 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7177 7178 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(), 7179 OpFlag); 7180 DebugLoc DL = JT->getDebugLoc(); 7181 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7182 7183 // With PIC, the address is actually $g + Offset. 7184 if (OpFlag) 7185 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7186 DAG.getNode(X86ISD::GlobalBaseReg, 7187 DebugLoc(), getPointerTy()), 7188 Result); 7189 7190 return Result; 7191} 7192 7193SDValue 7194X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { 7195 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 7196 7197 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7198 // global base reg. 7199 unsigned char OpFlag = 0; 7200 unsigned WrapperKind = X86ISD::Wrapper; 7201 CodeModel::Model M = getTargetMachine().getCodeModel(); 7202 7203 if (Subtarget->isPICStyleRIPRel() && 7204 (M == CodeModel::Small || M == CodeModel::Kernel)) { 7205 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF()) 7206 OpFlag = X86II::MO_GOTPCREL; 7207 WrapperKind = X86ISD::WrapperRIP; 7208 } else if (Subtarget->isPICStyleGOT()) { 7209 OpFlag = X86II::MO_GOT; 7210 } else if (Subtarget->isPICStyleStubPIC()) { 7211 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE; 7212 } else if (Subtarget->isPICStyleStubNoDynamic()) { 7213 OpFlag = X86II::MO_DARWIN_NONLAZY; 7214 } 7215 7216 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag); 7217 7218 DebugLoc DL = Op.getDebugLoc(); 7219 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7220 7221 7222 // With PIC, the address is actually $g + Offset. 7223 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 7224 !Subtarget->is64Bit()) { 7225 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7226 DAG.getNode(X86ISD::GlobalBaseReg, 7227 DebugLoc(), getPointerTy()), 7228 Result); 7229 } 7230 7231 // For symbols that require a load from a stub to get the address, emit the 7232 // load. 7233 if (isGlobalStubReference(OpFlag)) 7234 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result, 7235 MachinePointerInfo::getGOT(), false, false, false, 0); 7236 7237 return Result; 7238} 7239 7240SDValue 7241X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 7242 // Create the TargetBlockAddressAddress node. 7243 unsigned char OpFlags = 7244 Subtarget->ClassifyBlockAddressReference(); 7245 CodeModel::Model M = getTargetMachine().getCodeModel(); 7246 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 7247 DebugLoc dl = Op.getDebugLoc(); 7248 SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), 7249 /*isTarget=*/true, OpFlags); 7250 7251 if (Subtarget->isPICStyleRIPRel() && 7252 (M == CodeModel::Small || M == CodeModel::Kernel)) 7253 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 7254 else 7255 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 7256 7257 // With PIC, the address is actually $g + Offset. 7258 if (isGlobalRelativeToPICBase(OpFlags)) { 7259 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 7260 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 7261 Result); 7262 } 7263 7264 return Result; 7265} 7266 7267SDValue 7268X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, 7269 int64_t Offset, 7270 SelectionDAG &DAG) const { 7271 // Create the TargetGlobalAddress node, folding in the constant 7272 // offset if it is legal. 7273 unsigned char OpFlags = 7274 Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); 7275 CodeModel::Model M = getTargetMachine().getCodeModel(); 7276 SDValue Result; 7277 if (OpFlags == X86II::MO_NO_FLAG && 7278 X86::isOffsetSuitableForCodeModel(Offset, M)) { 7279 // A direct static reference to a global. 7280 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset); 7281 Offset = 0; 7282 } else { 7283 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 7284 } 7285 7286 if (Subtarget->isPICStyleRIPRel() && 7287 (M == CodeModel::Small || M == CodeModel::Kernel)) 7288 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 7289 else 7290 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 7291 7292 // With PIC, the address is actually $g + Offset. 7293 if (isGlobalRelativeToPICBase(OpFlags)) { 7294 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 7295 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 7296 Result); 7297 } 7298 7299 // For globals that require a load from a stub to get the address, emit the 7300 // load. 7301 if (isGlobalStubReference(OpFlags)) 7302 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result, 7303 MachinePointerInfo::getGOT(), false, false, false, 0); 7304 7305 // If there was a non-zero offset that we didn't fold, create an explicit 7306 // addition for it. 7307 if (Offset != 0) 7308 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result, 7309 DAG.getConstant(Offset, getPointerTy())); 7310 7311 return Result; 7312} 7313 7314SDValue 7315X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { 7316 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 7317 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); 7318 return LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG); 7319} 7320 7321static SDValue 7322GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, 7323 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg, 7324 unsigned char OperandFlags, bool LocalDynamic = false) { 7325 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7326 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7327 DebugLoc dl = GA->getDebugLoc(); 7328 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7329 GA->getValueType(0), 7330 GA->getOffset(), 7331 OperandFlags); 7332 7333 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR 7334 : X86ISD::TLSADDR; 7335 7336 if (InFlag) { 7337 SDValue Ops[] = { Chain, TGA, *InFlag }; 7338 Chain = DAG.getNode(CallType, dl, NodeTys, Ops, 3); 7339 } else { 7340 SDValue Ops[] = { Chain, TGA }; 7341 Chain = DAG.getNode(CallType, dl, NodeTys, Ops, 2); 7342 } 7343 7344 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 7345 MFI->setAdjustsStack(true); 7346 7347 SDValue Flag = Chain.getValue(1); 7348 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag); 7349} 7350 7351// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit 7352static SDValue 7353LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7354 const EVT PtrVT) { 7355 SDValue InFlag; 7356 DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better 7357 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 7358 DAG.getNode(X86ISD::GlobalBaseReg, 7359 DebugLoc(), PtrVT), InFlag); 7360 InFlag = Chain.getValue(1); 7361 7362 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD); 7363} 7364 7365// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit 7366static SDValue 7367LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7368 const EVT PtrVT) { 7369 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, 7370 X86::RAX, X86II::MO_TLSGD); 7371} 7372 7373static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA, 7374 SelectionDAG &DAG, 7375 const EVT PtrVT, 7376 bool is64Bit) { 7377 DebugLoc dl = GA->getDebugLoc(); 7378 7379 // Get the start address of the TLS block for this module. 7380 X86MachineFunctionInfo* MFI = DAG.getMachineFunction() 7381 .getInfo<X86MachineFunctionInfo>(); 7382 MFI->incNumLocalDynamicTLSAccesses(); 7383 7384 SDValue Base; 7385 if (is64Bit) { 7386 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, X86::RAX, 7387 X86II::MO_TLSLD, /*LocalDynamic=*/true); 7388 } else { 7389 SDValue InFlag; 7390 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 7391 DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), InFlag); 7392 InFlag = Chain.getValue(1); 7393 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, 7394 X86II::MO_TLSLDM, /*LocalDynamic=*/true); 7395 } 7396 7397 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations 7398 // of Base. 7399 7400 // Build x@dtpoff. 7401 unsigned char OperandFlags = X86II::MO_DTPOFF; 7402 unsigned WrapperKind = X86ISD::Wrapper; 7403 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7404 GA->getValueType(0), 7405 GA->getOffset(), OperandFlags); 7406 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 7407 7408 // Add x@dtpoff with the base. 7409 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base); 7410} 7411 7412// Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model. 7413static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7414 const EVT PtrVT, TLSModel::Model model, 7415 bool is64Bit, bool isPIC) { 7416 DebugLoc dl = GA->getDebugLoc(); 7417 7418 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit). 7419 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(), 7420 is64Bit ? 257 : 256)); 7421 7422 SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 7423 DAG.getIntPtrConstant(0), 7424 MachinePointerInfo(Ptr), 7425 false, false, false, 0); 7426 7427 unsigned char OperandFlags = 0; 7428 // Most TLS accesses are not RIP relative, even on x86-64. One exception is 7429 // initialexec. 7430 unsigned WrapperKind = X86ISD::Wrapper; 7431 if (model == TLSModel::LocalExec) { 7432 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF; 7433 } else if (model == TLSModel::InitialExec) { 7434 if (is64Bit) { 7435 OperandFlags = X86II::MO_GOTTPOFF; 7436 WrapperKind = X86ISD::WrapperRIP; 7437 } else { 7438 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF; 7439 } 7440 } else { 7441 llvm_unreachable("Unexpected model"); 7442 } 7443 7444 // emit "addl x@ntpoff,%eax" (local exec) 7445 // or "addl x@indntpoff,%eax" (initial exec) 7446 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic) 7447 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7448 GA->getValueType(0), 7449 GA->getOffset(), OperandFlags); 7450 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 7451 7452 if (model == TLSModel::InitialExec) { 7453 if (isPIC && !is64Bit) { 7454 Offset = DAG.getNode(ISD::ADD, dl, PtrVT, 7455 DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), 7456 Offset); 7457 } 7458 7459 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, 7460 MachinePointerInfo::getGOT(), false, false, false, 7461 0); 7462 } 7463 7464 // The address of the thread local variable is the add of the thread 7465 // pointer with the offset of the variable. 7466 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 7467} 7468 7469SDValue 7470X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 7471 7472 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 7473 const GlobalValue *GV = GA->getGlobal(); 7474 7475 if (Subtarget->isTargetELF()) { 7476 TLSModel::Model model = getTargetMachine().getTLSModel(GV); 7477 7478 switch (model) { 7479 case TLSModel::GeneralDynamic: 7480 if (Subtarget->is64Bit()) 7481 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy()); 7482 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy()); 7483 case TLSModel::LocalDynamic: 7484 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(), 7485 Subtarget->is64Bit()); 7486 case TLSModel::InitialExec: 7487 case TLSModel::LocalExec: 7488 return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, 7489 Subtarget->is64Bit(), 7490 getTargetMachine().getRelocationModel() == Reloc::PIC_); 7491 } 7492 llvm_unreachable("Unknown TLS model."); 7493 } 7494 7495 if (Subtarget->isTargetDarwin()) { 7496 // Darwin only has one model of TLS. Lower to that. 7497 unsigned char OpFlag = 0; 7498 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ? 7499 X86ISD::WrapperRIP : X86ISD::Wrapper; 7500 7501 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7502 // global base reg. 7503 bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) && 7504 !Subtarget->is64Bit(); 7505 if (PIC32) 7506 OpFlag = X86II::MO_TLVP_PIC_BASE; 7507 else 7508 OpFlag = X86II::MO_TLVP; 7509 DebugLoc DL = Op.getDebugLoc(); 7510 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, 7511 GA->getValueType(0), 7512 GA->getOffset(), OpFlag); 7513 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7514 7515 // With PIC32, the address is actually $g + Offset. 7516 if (PIC32) 7517 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7518 DAG.getNode(X86ISD::GlobalBaseReg, 7519 DebugLoc(), getPointerTy()), 7520 Offset); 7521 7522 // Lowering the machine isd will make sure everything is in the right 7523 // location. 7524 SDValue Chain = DAG.getEntryNode(); 7525 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7526 SDValue Args[] = { Chain, Offset }; 7527 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args, 2); 7528 7529 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls. 7530 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7531 MFI->setAdjustsStack(true); 7532 7533 // And our return value (tls address) is in the standard call return value 7534 // location. 7535 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 7536 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(), 7537 Chain.getValue(1)); 7538 } 7539 7540 if (Subtarget->isTargetWindows()) { 7541 // Just use the implicit TLS architecture 7542 // Need to generate someting similar to: 7543 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage 7544 // ; from TEB 7545 // mov ecx, dword [rel _tls_index]: Load index (from C runtime) 7546 // mov rcx, qword [rdx+rcx*8] 7547 // mov eax, .tls$:tlsvar 7548 // [rax+rcx] contains the address 7549 // Windows 64bit: gs:0x58 7550 // Windows 32bit: fs:__tls_array 7551 7552 // If GV is an alias then use the aliasee for determining 7553 // thread-localness. 7554 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 7555 GV = GA->resolveAliasedGlobal(false); 7556 DebugLoc dl = GA->getDebugLoc(); 7557 SDValue Chain = DAG.getEntryNode(); 7558 7559 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or 7560 // %gs:0x58 (64-bit). 7561 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit() 7562 ? Type::getInt8PtrTy(*DAG.getContext(), 7563 256) 7564 : Type::getInt32PtrTy(*DAG.getContext(), 7565 257)); 7566 7567 SDValue ThreadPointer = DAG.getLoad(getPointerTy(), dl, Chain, 7568 Subtarget->is64Bit() 7569 ? DAG.getIntPtrConstant(0x58) 7570 : DAG.getExternalSymbol("_tls_array", 7571 getPointerTy()), 7572 MachinePointerInfo(Ptr), 7573 false, false, false, 0); 7574 7575 // Load the _tls_index variable 7576 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy()); 7577 if (Subtarget->is64Bit()) 7578 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain, 7579 IDX, MachinePointerInfo(), MVT::i32, 7580 false, false, 0); 7581 else 7582 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(), 7583 false, false, false, 0); 7584 7585 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()), 7586 getPointerTy()); 7587 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale); 7588 7589 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX); 7590 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(), 7591 false, false, false, 0); 7592 7593 // Get the offset of start of .tls section 7594 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7595 GA->getValueType(0), 7596 GA->getOffset(), X86II::MO_SECREL); 7597 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA); 7598 7599 // The address of the thread local variable is the add of the thread 7600 // pointer with the offset of the variable. 7601 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset); 7602 } 7603 7604 llvm_unreachable("TLS not implemented for this target."); 7605} 7606 7607 7608/// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values 7609/// and take a 2 x i32 value to shift plus a shift amount. 7610SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const{ 7611 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 7612 EVT VT = Op.getValueType(); 7613 unsigned VTBits = VT.getSizeInBits(); 7614 DebugLoc dl = Op.getDebugLoc(); 7615 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 7616 SDValue ShOpLo = Op.getOperand(0); 7617 SDValue ShOpHi = Op.getOperand(1); 7618 SDValue ShAmt = Op.getOperand(2); 7619 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 7620 DAG.getConstant(VTBits - 1, MVT::i8)) 7621 : DAG.getConstant(0, VT); 7622 7623 SDValue Tmp2, Tmp3; 7624 if (Op.getOpcode() == ISD::SHL_PARTS) { 7625 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt); 7626 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 7627 } else { 7628 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt); 7629 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, ShAmt); 7630 } 7631 7632 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt, 7633 DAG.getConstant(VTBits, MVT::i8)); 7634 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 7635 AndNode, DAG.getConstant(0, MVT::i8)); 7636 7637 SDValue Hi, Lo; 7638 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8); 7639 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond }; 7640 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond }; 7641 7642 if (Op.getOpcode() == ISD::SHL_PARTS) { 7643 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 7644 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 7645 } else { 7646 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 7647 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 7648 } 7649 7650 SDValue Ops[2] = { Lo, Hi }; 7651 return DAG.getMergeValues(Ops, 2, dl); 7652} 7653 7654SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, 7655 SelectionDAG &DAG) const { 7656 EVT SrcVT = Op.getOperand(0).getValueType(); 7657 7658 if (SrcVT.isVector()) 7659 return SDValue(); 7660 7661 assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 && 7662 "Unknown SINT_TO_FP to lower!"); 7663 7664 // These are really Legal; return the operand so the caller accepts it as 7665 // Legal. 7666 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 7667 return Op; 7668 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) && 7669 Subtarget->is64Bit()) { 7670 return Op; 7671 } 7672 7673 DebugLoc dl = Op.getDebugLoc(); 7674 unsigned Size = SrcVT.getSizeInBits()/8; 7675 MachineFunction &MF = DAG.getMachineFunction(); 7676 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false); 7677 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7678 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 7679 StackSlot, 7680 MachinePointerInfo::getFixedStack(SSFI), 7681 false, false, 0); 7682 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG); 7683} 7684 7685SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, 7686 SDValue StackSlot, 7687 SelectionDAG &DAG) const { 7688 // Build the FILD 7689 DebugLoc DL = Op.getDebugLoc(); 7690 SDVTList Tys; 7691 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 7692 if (useSSE) 7693 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue); 7694 else 7695 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 7696 7697 unsigned ByteSize = SrcVT.getSizeInBits()/8; 7698 7699 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot); 7700 MachineMemOperand *MMO; 7701 if (FI) { 7702 int SSFI = FI->getIndex(); 7703 MMO = 7704 DAG.getMachineFunction() 7705 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7706 MachineMemOperand::MOLoad, ByteSize, ByteSize); 7707 } else { 7708 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand(); 7709 StackSlot = StackSlot.getOperand(1); 7710 } 7711 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) }; 7712 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : 7713 X86ISD::FILD, DL, 7714 Tys, Ops, array_lengthof(Ops), 7715 SrcVT, MMO); 7716 7717 if (useSSE) { 7718 Chain = Result.getValue(1); 7719 SDValue InFlag = Result.getValue(2); 7720 7721 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 7722 // shouldn't be necessary except that RFP cannot be live across 7723 // multiple blocks. When stackifier is fixed, they can be uncoupled. 7724 MachineFunction &MF = DAG.getMachineFunction(); 7725 unsigned SSFISize = Op.getValueType().getSizeInBits()/8; 7726 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false); 7727 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7728 Tys = DAG.getVTList(MVT::Other); 7729 SDValue Ops[] = { 7730 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag 7731 }; 7732 MachineMemOperand *MMO = 7733 DAG.getMachineFunction() 7734 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7735 MachineMemOperand::MOStore, SSFISize, SSFISize); 7736 7737 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, 7738 Ops, array_lengthof(Ops), 7739 Op.getValueType(), MMO); 7740 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot, 7741 MachinePointerInfo::getFixedStack(SSFI), 7742 false, false, false, 0); 7743 } 7744 7745 return Result; 7746} 7747 7748// LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion. 7749SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, 7750 SelectionDAG &DAG) const { 7751 // This algorithm is not obvious. Here it is what we're trying to output: 7752 /* 7753 movq %rax, %xmm0 7754 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U } 7755 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 } 7756 #ifdef __SSE3__ 7757 haddpd %xmm0, %xmm0 7758 #else 7759 pshufd $0x4e, %xmm0, %xmm1 7760 addpd %xmm1, %xmm0 7761 #endif 7762 */ 7763 7764 DebugLoc dl = Op.getDebugLoc(); 7765 LLVMContext *Context = DAG.getContext(); 7766 7767 // Build some magic constants. 7768 const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 }; 7769 Constant *C0 = ConstantDataVector::get(*Context, CV0); 7770 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16); 7771 7772 SmallVector<Constant*,2> CV1; 7773 CV1.push_back( 7774 ConstantFP::get(*Context, APFloat(APInt(64, 0x4330000000000000ULL)))); 7775 CV1.push_back( 7776 ConstantFP::get(*Context, APFloat(APInt(64, 0x4530000000000000ULL)))); 7777 Constant *C1 = ConstantVector::get(CV1); 7778 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16); 7779 7780 // Load the 64-bit value into an XMM register. 7781 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 7782 Op.getOperand(0)); 7783 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0, 7784 MachinePointerInfo::getConstantPool(), 7785 false, false, false, 16); 7786 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32, 7787 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1), 7788 CLod0); 7789 7790 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1, 7791 MachinePointerInfo::getConstantPool(), 7792 false, false, false, 16); 7793 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1); 7794 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1); 7795 SDValue Result; 7796 7797 if (Subtarget->hasSSE3()) { 7798 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'. 7799 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub); 7800 } else { 7801 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub); 7802 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32, 7803 S2F, 0x4E, DAG); 7804 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, 7805 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle), 7806 Sub); 7807 } 7808 7809 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result, 7810 DAG.getIntPtrConstant(0)); 7811} 7812 7813// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion. 7814SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, 7815 SelectionDAG &DAG) const { 7816 DebugLoc dl = Op.getDebugLoc(); 7817 // FP constant to bias correct the final result. 7818 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), 7819 MVT::f64); 7820 7821 // Load the 32-bit value into an XMM register. 7822 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 7823 Op.getOperand(0)); 7824 7825 // Zero out the upper parts of the register. 7826 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG); 7827 7828 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 7829 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load), 7830 DAG.getIntPtrConstant(0)); 7831 7832 // Or the load with the bias. 7833 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, 7834 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 7835 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 7836 MVT::v2f64, Load)), 7837 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 7838 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 7839 MVT::v2f64, Bias))); 7840 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 7841 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or), 7842 DAG.getIntPtrConstant(0)); 7843 7844 // Subtract the bias. 7845 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias); 7846 7847 // Handle final rounding. 7848 EVT DestVT = Op.getValueType(); 7849 7850 if (DestVT.bitsLT(MVT::f64)) 7851 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 7852 DAG.getIntPtrConstant(0)); 7853 if (DestVT.bitsGT(MVT::f64)) 7854 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 7855 7856 // Handle final rounding. 7857 return Sub; 7858} 7859 7860SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, 7861 SelectionDAG &DAG) const { 7862 SDValue N0 = Op.getOperand(0); 7863 DebugLoc dl = Op.getDebugLoc(); 7864 7865 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't 7866 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform 7867 // the optimization here. 7868 if (DAG.SignBitIsZero(N0)) 7869 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0); 7870 7871 EVT SrcVT = N0.getValueType(); 7872 EVT DstVT = Op.getValueType(); 7873 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64) 7874 return LowerUINT_TO_FP_i64(Op, DAG); 7875 if (SrcVT == MVT::i32 && X86ScalarSSEf64) 7876 return LowerUINT_TO_FP_i32(Op, DAG); 7877 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32) 7878 return SDValue(); 7879 7880 // Make a 64-bit buffer, and use it to build an FILD. 7881 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64); 7882 if (SrcVT == MVT::i32) { 7883 SDValue WordOff = DAG.getConstant(4, getPointerTy()); 7884 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl, 7885 getPointerTy(), StackSlot, WordOff); 7886 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 7887 StackSlot, MachinePointerInfo(), 7888 false, false, 0); 7889 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32), 7890 OffsetSlot, MachinePointerInfo(), 7891 false, false, 0); 7892 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG); 7893 return Fild; 7894 } 7895 7896 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP"); 7897 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 7898 StackSlot, MachinePointerInfo(), 7899 false, false, 0); 7900 // For i64 source, we need to add the appropriate power of 2 if the input 7901 // was negative. This is the same as the optimization in 7902 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here, 7903 // we must be careful to do the computation in x87 extended precision, not 7904 // in SSE. (The generic code can't know it's OK to do this, or how to.) 7905 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex(); 7906 MachineMemOperand *MMO = 7907 DAG.getMachineFunction() 7908 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7909 MachineMemOperand::MOLoad, 8, 8); 7910 7911 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other); 7912 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) }; 7913 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, 3, 7914 MVT::i64, MMO); 7915 7916 APInt FF(32, 0x5F800000ULL); 7917 7918 // Check whether the sign bit is set. 7919 SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(MVT::i64), 7920 Op.getOperand(0), DAG.getConstant(0, MVT::i64), 7921 ISD::SETLT); 7922 7923 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits. 7924 SDValue FudgePtr = DAG.getConstantPool( 7925 ConstantInt::get(*DAG.getContext(), FF.zext(64)), 7926 getPointerTy()); 7927 7928 // Get a pointer to FF if the sign bit was set, or to 0 otherwise. 7929 SDValue Zero = DAG.getIntPtrConstant(0); 7930 SDValue Four = DAG.getIntPtrConstant(4); 7931 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet, 7932 Zero, Four); 7933 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset); 7934 7935 // Load the value out, extending it from f32 to f80. 7936 // FIXME: Avoid the extend by constructing the right constant pool? 7937 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), 7938 FudgePtr, MachinePointerInfo::getConstantPool(), 7939 MVT::f32, false, false, 4); 7940 // Extend everything to 80 bits to force it to be done on x87. 7941 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge); 7942 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0)); 7943} 7944 7945std::pair<SDValue,SDValue> X86TargetLowering:: 7946FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned, bool IsReplace) const { 7947 DebugLoc DL = Op.getDebugLoc(); 7948 7949 EVT DstTy = Op.getValueType(); 7950 7951 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) { 7952 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT"); 7953 DstTy = MVT::i64; 7954 } 7955 7956 assert(DstTy.getSimpleVT() <= MVT::i64 && 7957 DstTy.getSimpleVT() >= MVT::i16 && 7958 "Unknown FP_TO_INT to lower!"); 7959 7960 // These are really Legal. 7961 if (DstTy == MVT::i32 && 7962 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 7963 return std::make_pair(SDValue(), SDValue()); 7964 if (Subtarget->is64Bit() && 7965 DstTy == MVT::i64 && 7966 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 7967 return std::make_pair(SDValue(), SDValue()); 7968 7969 // We lower FP->int64 either into FISTP64 followed by a load from a temporary 7970 // stack slot, or into the FTOL runtime function. 7971 MachineFunction &MF = DAG.getMachineFunction(); 7972 unsigned MemSize = DstTy.getSizeInBits()/8; 7973 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 7974 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7975 7976 unsigned Opc; 7977 if (!IsSigned && isIntegerTypeFTOL(DstTy)) 7978 Opc = X86ISD::WIN_FTOL; 7979 else 7980 switch (DstTy.getSimpleVT().SimpleTy) { 7981 default: llvm_unreachable("Invalid FP_TO_SINT to lower!"); 7982 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 7983 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 7984 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 7985 } 7986 7987 SDValue Chain = DAG.getEntryNode(); 7988 SDValue Value = Op.getOperand(0); 7989 EVT TheVT = Op.getOperand(0).getValueType(); 7990 // FIXME This causes a redundant load/store if the SSE-class value is already 7991 // in memory, such as if it is on the callstack. 7992 if (isScalarFPTypeInSSEReg(TheVT)) { 7993 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 7994 Chain = DAG.getStore(Chain, DL, Value, StackSlot, 7995 MachinePointerInfo::getFixedStack(SSFI), 7996 false, false, 0); 7997 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 7998 SDValue Ops[] = { 7999 Chain, StackSlot, DAG.getValueType(TheVT) 8000 }; 8001 8002 MachineMemOperand *MMO = 8003 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8004 MachineMemOperand::MOLoad, MemSize, MemSize); 8005 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, 3, 8006 DstTy, MMO); 8007 Chain = Value.getValue(1); 8008 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 8009 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8010 } 8011 8012 MachineMemOperand *MMO = 8013 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8014 MachineMemOperand::MOStore, MemSize, MemSize); 8015 8016 if (Opc != X86ISD::WIN_FTOL) { 8017 // Build the FP_TO_INT*_IN_MEM 8018 SDValue Ops[] = { Chain, Value, StackSlot }; 8019 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other), 8020 Ops, 3, DstTy, MMO); 8021 return std::make_pair(FIST, StackSlot); 8022 } else { 8023 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL, 8024 DAG.getVTList(MVT::Other, MVT::Glue), 8025 Chain, Value); 8026 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX, 8027 MVT::i32, ftol.getValue(1)); 8028 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX, 8029 MVT::i32, eax.getValue(2)); 8030 SDValue Ops[] = { eax, edx }; 8031 SDValue pair = IsReplace 8032 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops, 2) 8033 : DAG.getMergeValues(Ops, 2, DL); 8034 return std::make_pair(pair, SDValue()); 8035 } 8036} 8037 8038SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, 8039 SelectionDAG &DAG) const { 8040 if (Op.getValueType().isVector()) 8041 return SDValue(); 8042 8043 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, 8044 /*IsSigned=*/ true, /*IsReplace=*/ false); 8045 SDValue FIST = Vals.first, StackSlot = Vals.second; 8046 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal. 8047 if (FIST.getNode() == 0) return Op; 8048 8049 if (StackSlot.getNode()) 8050 // Load the result. 8051 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 8052 FIST, StackSlot, MachinePointerInfo(), 8053 false, false, false, 0); 8054 8055 // The node is the result. 8056 return FIST; 8057} 8058 8059SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, 8060 SelectionDAG &DAG) const { 8061 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, 8062 /*IsSigned=*/ false, /*IsReplace=*/ false); 8063 SDValue FIST = Vals.first, StackSlot = Vals.second; 8064 assert(FIST.getNode() && "Unexpected failure"); 8065 8066 if (StackSlot.getNode()) 8067 // Load the result. 8068 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 8069 FIST, StackSlot, MachinePointerInfo(), 8070 false, false, false, 0); 8071 8072 // The node is the result. 8073 return FIST; 8074} 8075 8076SDValue X86TargetLowering::LowerFABS(SDValue Op, 8077 SelectionDAG &DAG) const { 8078 LLVMContext *Context = DAG.getContext(); 8079 DebugLoc dl = Op.getDebugLoc(); 8080 EVT VT = Op.getValueType(); 8081 EVT EltVT = VT; 8082 if (VT.isVector()) 8083 EltVT = VT.getVectorElementType(); 8084 Constant *C; 8085 if (EltVT == MVT::f64) { 8086 C = ConstantVector::getSplat(2, 8087 ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63))))); 8088 } else { 8089 C = ConstantVector::getSplat(4, 8090 ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31))))); 8091 } 8092 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8093 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8094 MachinePointerInfo::getConstantPool(), 8095 false, false, false, 16); 8096 return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask); 8097} 8098 8099SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const { 8100 LLVMContext *Context = DAG.getContext(); 8101 DebugLoc dl = Op.getDebugLoc(); 8102 EVT VT = Op.getValueType(); 8103 EVT EltVT = VT; 8104 unsigned NumElts = VT == MVT::f64 ? 2 : 4; 8105 if (VT.isVector()) { 8106 EltVT = VT.getVectorElementType(); 8107 NumElts = VT.getVectorNumElements(); 8108 } 8109 Constant *C; 8110 if (EltVT == MVT::f64) 8111 C = ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63))); 8112 else 8113 C = ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31))); 8114 C = ConstantVector::getSplat(NumElts, C); 8115 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8116 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8117 MachinePointerInfo::getConstantPool(), 8118 false, false, false, 16); 8119 if (VT.isVector()) { 8120 MVT XORVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; 8121 return DAG.getNode(ISD::BITCAST, dl, VT, 8122 DAG.getNode(ISD::XOR, dl, XORVT, 8123 DAG.getNode(ISD::BITCAST, dl, XORVT, 8124 Op.getOperand(0)), 8125 DAG.getNode(ISD::BITCAST, dl, XORVT, Mask))); 8126 } 8127 8128 return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask); 8129} 8130 8131SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 8132 LLVMContext *Context = DAG.getContext(); 8133 SDValue Op0 = Op.getOperand(0); 8134 SDValue Op1 = Op.getOperand(1); 8135 DebugLoc dl = Op.getDebugLoc(); 8136 EVT VT = Op.getValueType(); 8137 EVT SrcVT = Op1.getValueType(); 8138 8139 // If second operand is smaller, extend it first. 8140 if (SrcVT.bitsLT(VT)) { 8141 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1); 8142 SrcVT = VT; 8143 } 8144 // And if it is bigger, shrink it first. 8145 if (SrcVT.bitsGT(VT)) { 8146 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1)); 8147 SrcVT = VT; 8148 } 8149 8150 // At this point the operands and the result should have the same 8151 // type, and that won't be f80 since that is not custom lowered. 8152 8153 // First get the sign bit of second operand. 8154 SmallVector<Constant*,4> CV; 8155 if (SrcVT == MVT::f64) { 8156 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63)))); 8157 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 8158 } else { 8159 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31)))); 8160 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8161 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8162 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8163 } 8164 Constant *C = ConstantVector::get(CV); 8165 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8166 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx, 8167 MachinePointerInfo::getConstantPool(), 8168 false, false, false, 16); 8169 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1); 8170 8171 // Shift sign bit right or left if the two operands have different types. 8172 if (SrcVT.bitsGT(VT)) { 8173 // Op0 is MVT::f32, Op1 is MVT::f64. 8174 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit); 8175 SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit, 8176 DAG.getConstant(32, MVT::i32)); 8177 SignBit = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, SignBit); 8178 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit, 8179 DAG.getIntPtrConstant(0)); 8180 } 8181 8182 // Clear first operand sign bit. 8183 CV.clear(); 8184 if (VT == MVT::f64) { 8185 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63))))); 8186 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 8187 } else { 8188 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31))))); 8189 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8190 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8191 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8192 } 8193 C = ConstantVector::get(CV); 8194 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8195 SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8196 MachinePointerInfo::getConstantPool(), 8197 false, false, false, 16); 8198 SDValue Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Mask2); 8199 8200 // Or the value with the sign bit. 8201 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit); 8202} 8203 8204SDValue X86TargetLowering::LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) const { 8205 SDValue N0 = Op.getOperand(0); 8206 DebugLoc dl = Op.getDebugLoc(); 8207 EVT VT = Op.getValueType(); 8208 8209 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1). 8210 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0, 8211 DAG.getConstant(1, VT)); 8212 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT)); 8213} 8214 8215/// Emit nodes that will be selected as "test Op0,Op0", or something 8216/// equivalent. 8217SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, 8218 SelectionDAG &DAG) const { 8219 DebugLoc dl = Op.getDebugLoc(); 8220 8221 // CF and OF aren't always set the way we want. Determine which 8222 // of these we need. 8223 bool NeedCF = false; 8224 bool NeedOF = false; 8225 switch (X86CC) { 8226 default: break; 8227 case X86::COND_A: case X86::COND_AE: 8228 case X86::COND_B: case X86::COND_BE: 8229 NeedCF = true; 8230 break; 8231 case X86::COND_G: case X86::COND_GE: 8232 case X86::COND_L: case X86::COND_LE: 8233 case X86::COND_O: case X86::COND_NO: 8234 NeedOF = true; 8235 break; 8236 } 8237 8238 // See if we can use the EFLAGS value from the operand instead of 8239 // doing a separate TEST. TEST always sets OF and CF to 0, so unless 8240 // we prove that the arithmetic won't overflow, we can't use OF or CF. 8241 if (Op.getResNo() != 0 || NeedOF || NeedCF) 8242 // Emit a CMP with 0, which is the TEST pattern. 8243 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 8244 DAG.getConstant(0, Op.getValueType())); 8245 8246 unsigned Opcode = 0; 8247 unsigned NumOperands = 0; 8248 switch (Op.getNode()->getOpcode()) { 8249 case ISD::ADD: 8250 // Due to an isel shortcoming, be conservative if this add is likely to be 8251 // selected as part of a load-modify-store instruction. When the root node 8252 // in a match is a store, isel doesn't know how to remap non-chain non-flag 8253 // uses of other nodes in the match, such as the ADD in this case. This 8254 // leads to the ADD being left around and reselected, with the result being 8255 // two adds in the output. Alas, even if none our users are stores, that 8256 // doesn't prove we're O.K. Ergo, if we have any parents that aren't 8257 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require 8258 // climbing the DAG back to the root, and it doesn't seem to be worth the 8259 // effort. 8260 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8261 UE = Op.getNode()->use_end(); UI != UE; ++UI) 8262 if (UI->getOpcode() != ISD::CopyToReg && 8263 UI->getOpcode() != ISD::SETCC && 8264 UI->getOpcode() != ISD::STORE) 8265 goto default_case; 8266 8267 if (ConstantSDNode *C = 8268 dyn_cast<ConstantSDNode>(Op.getNode()->getOperand(1))) { 8269 // An add of one will be selected as an INC. 8270 if (C->getAPIntValue() == 1) { 8271 Opcode = X86ISD::INC; 8272 NumOperands = 1; 8273 break; 8274 } 8275 8276 // An add of negative one (subtract of one) will be selected as a DEC. 8277 if (C->getAPIntValue().isAllOnesValue()) { 8278 Opcode = X86ISD::DEC; 8279 NumOperands = 1; 8280 break; 8281 } 8282 } 8283 8284 // Otherwise use a regular EFLAGS-setting add. 8285 Opcode = X86ISD::ADD; 8286 NumOperands = 2; 8287 break; 8288 case ISD::AND: { 8289 // If the primary and result isn't used, don't bother using X86ISD::AND, 8290 // because a TEST instruction will be better. 8291 bool NonFlagUse = false; 8292 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8293 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 8294 SDNode *User = *UI; 8295 unsigned UOpNo = UI.getOperandNo(); 8296 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) { 8297 // Look pass truncate. 8298 UOpNo = User->use_begin().getOperandNo(); 8299 User = *User->use_begin(); 8300 } 8301 8302 if (User->getOpcode() != ISD::BRCOND && 8303 User->getOpcode() != ISD::SETCC && 8304 (User->getOpcode() != ISD::SELECT || UOpNo != 0)) { 8305 NonFlagUse = true; 8306 break; 8307 } 8308 } 8309 8310 if (!NonFlagUse) 8311 break; 8312 } 8313 // FALL THROUGH 8314 case ISD::SUB: 8315 case ISD::OR: 8316 case ISD::XOR: 8317 // Due to the ISEL shortcoming noted above, be conservative if this op is 8318 // likely to be selected as part of a load-modify-store instruction. 8319 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8320 UE = Op.getNode()->use_end(); UI != UE; ++UI) 8321 if (UI->getOpcode() == ISD::STORE) 8322 goto default_case; 8323 8324 // Otherwise use a regular EFLAGS-setting instruction. 8325 switch (Op.getNode()->getOpcode()) { 8326 default: llvm_unreachable("unexpected operator!"); 8327 case ISD::SUB: 8328 Opcode = X86ISD::SUB; 8329 break; 8330 case ISD::OR: Opcode = X86ISD::OR; break; 8331 case ISD::XOR: Opcode = X86ISD::XOR; break; 8332 case ISD::AND: Opcode = X86ISD::AND; break; 8333 } 8334 8335 NumOperands = 2; 8336 break; 8337 case X86ISD::ADD: 8338 case X86ISD::SUB: 8339 case X86ISD::INC: 8340 case X86ISD::DEC: 8341 case X86ISD::OR: 8342 case X86ISD::XOR: 8343 case X86ISD::AND: 8344 return SDValue(Op.getNode(), 1); 8345 default: 8346 default_case: 8347 break; 8348 } 8349 8350 if (Opcode == 0) 8351 // Emit a CMP with 0, which is the TEST pattern. 8352 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 8353 DAG.getConstant(0, Op.getValueType())); 8354 8355 if (Opcode == X86ISD::CMP) { 8356 SDValue New = DAG.getNode(Opcode, dl, MVT::i32, Op.getOperand(0), 8357 Op.getOperand(1)); 8358 // We can't replace usage of SUB with CMP. 8359 // The SUB node will be removed later because there is no use of it. 8360 return SDValue(New.getNode(), 0); 8361 } 8362 8363 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 8364 SmallVector<SDValue, 4> Ops; 8365 for (unsigned i = 0; i != NumOperands; ++i) 8366 Ops.push_back(Op.getOperand(i)); 8367 8368 SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands); 8369 DAG.ReplaceAllUsesWith(Op, New); 8370 return SDValue(New.getNode(), 1); 8371} 8372 8373/// Emit nodes that will be selected as "cmp Op0,Op1", or something 8374/// equivalent. 8375SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, 8376 SelectionDAG &DAG) const { 8377 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) 8378 if (C->getAPIntValue() == 0) 8379 return EmitTest(Op0, X86CC, DAG); 8380 8381 DebugLoc dl = Op0.getDebugLoc(); 8382 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 || 8383 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) { 8384 // Use SUB instead of CMP to enable CSE between SUB and CMP. 8385 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32); 8386 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, 8387 Op0, Op1); 8388 return SDValue(Sub.getNode(), 1); 8389 } 8390 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1); 8391} 8392 8393/// Convert a comparison if required by the subtarget. 8394SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp, 8395 SelectionDAG &DAG) const { 8396 // If the subtarget does not support the FUCOMI instruction, floating-point 8397 // comparisons have to be converted. 8398 if (Subtarget->hasCMov() || 8399 Cmp.getOpcode() != X86ISD::CMP || 8400 !Cmp.getOperand(0).getValueType().isFloatingPoint() || 8401 !Cmp.getOperand(1).getValueType().isFloatingPoint()) 8402 return Cmp; 8403 8404 // The instruction selector will select an FUCOM instruction instead of 8405 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence 8406 // build an SDNode sequence that transfers the result from FPSW into EFLAGS: 8407 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8)))) 8408 DebugLoc dl = Cmp.getDebugLoc(); 8409 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp); 8410 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW); 8411 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW, 8412 DAG.getConstant(8, MVT::i8)); 8413 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl); 8414 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl); 8415} 8416 8417/// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node 8418/// if it's possible. 8419SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, 8420 DebugLoc dl, SelectionDAG &DAG) const { 8421 SDValue Op0 = And.getOperand(0); 8422 SDValue Op1 = And.getOperand(1); 8423 if (Op0.getOpcode() == ISD::TRUNCATE) 8424 Op0 = Op0.getOperand(0); 8425 if (Op1.getOpcode() == ISD::TRUNCATE) 8426 Op1 = Op1.getOperand(0); 8427 8428 SDValue LHS, RHS; 8429 if (Op1.getOpcode() == ISD::SHL) 8430 std::swap(Op0, Op1); 8431 if (Op0.getOpcode() == ISD::SHL) { 8432 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0))) 8433 if (And00C->getZExtValue() == 1) { 8434 // If we looked past a truncate, check that it's only truncating away 8435 // known zeros. 8436 unsigned BitWidth = Op0.getValueSizeInBits(); 8437 unsigned AndBitWidth = And.getValueSizeInBits(); 8438 if (BitWidth > AndBitWidth) { 8439 APInt Zeros, Ones; 8440 DAG.ComputeMaskedBits(Op0, Zeros, Ones); 8441 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth) 8442 return SDValue(); 8443 } 8444 LHS = Op1; 8445 RHS = Op0.getOperand(1); 8446 } 8447 } else if (Op1.getOpcode() == ISD::Constant) { 8448 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1); 8449 uint64_t AndRHSVal = AndRHS->getZExtValue(); 8450 SDValue AndLHS = Op0; 8451 8452 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) { 8453 LHS = AndLHS.getOperand(0); 8454 RHS = AndLHS.getOperand(1); 8455 } 8456 8457 // Use BT if the immediate can't be encoded in a TEST instruction. 8458 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) { 8459 LHS = AndLHS; 8460 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType()); 8461 } 8462 } 8463 8464 if (LHS.getNode()) { 8465 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT 8466 // instruction. Since the shift amount is in-range-or-undefined, we know 8467 // that doing a bittest on the i32 value is ok. We extend to i32 because 8468 // the encoding for the i16 version is larger than the i32 version. 8469 // Also promote i16 to i32 for performance / code size reason. 8470 if (LHS.getValueType() == MVT::i8 || 8471 LHS.getValueType() == MVT::i16) 8472 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS); 8473 8474 // If the operand types disagree, extend the shift amount to match. Since 8475 // BT ignores high bits (like shifts) we can use anyextend. 8476 if (LHS.getValueType() != RHS.getValueType()) 8477 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS); 8478 8479 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS); 8480 unsigned Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B; 8481 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 8482 DAG.getConstant(Cond, MVT::i8), BT); 8483 } 8484 8485 return SDValue(); 8486} 8487 8488SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 8489 8490 if (Op.getValueType().isVector()) return LowerVSETCC(Op, DAG); 8491 8492 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 8493 SDValue Op0 = Op.getOperand(0); 8494 SDValue Op1 = Op.getOperand(1); 8495 DebugLoc dl = Op.getDebugLoc(); 8496 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 8497 8498 // Optimize to BT if possible. 8499 // Lower (X & (1 << N)) == 0 to BT(X, N). 8500 // Lower ((X >>u N) & 1) != 0 to BT(X, N). 8501 // Lower ((X >>s N) & 1) != 0 to BT(X, N). 8502 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && 8503 Op1.getOpcode() == ISD::Constant && 8504 cast<ConstantSDNode>(Op1)->isNullValue() && 8505 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 8506 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG); 8507 if (NewSetCC.getNode()) 8508 return NewSetCC; 8509 } 8510 8511 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of 8512 // these. 8513 if (Op1.getOpcode() == ISD::Constant && 8514 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 || 8515 cast<ConstantSDNode>(Op1)->isNullValue()) && 8516 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 8517 8518 // If the input is a setcc, then reuse the input setcc or use a new one with 8519 // the inverted condition. 8520 if (Op0.getOpcode() == X86ISD::SETCC) { 8521 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0); 8522 bool Invert = (CC == ISD::SETNE) ^ 8523 cast<ConstantSDNode>(Op1)->isNullValue(); 8524 if (!Invert) return Op0; 8525 8526 CCode = X86::GetOppositeBranchCondition(CCode); 8527 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 8528 DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1)); 8529 } 8530 } 8531 8532 bool isFP = Op1.getValueType().isFloatingPoint(); 8533 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG); 8534 if (X86CC == X86::COND_INVALID) 8535 return SDValue(); 8536 8537 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, DAG); 8538 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG); 8539 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 8540 DAG.getConstant(X86CC, MVT::i8), EFLAGS); 8541} 8542 8543// Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128 8544// ones, and then concatenate the result back. 8545static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) { 8546 EVT VT = Op.getValueType(); 8547 8548 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC && 8549 "Unsupported value type for operation"); 8550 8551 unsigned NumElems = VT.getVectorNumElements(); 8552 DebugLoc dl = Op.getDebugLoc(); 8553 SDValue CC = Op.getOperand(2); 8554 8555 // Extract the LHS vectors 8556 SDValue LHS = Op.getOperand(0); 8557 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 8558 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 8559 8560 // Extract the RHS vectors 8561 SDValue RHS = Op.getOperand(1); 8562 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl); 8563 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl); 8564 8565 // Issue the operation on the smaller types and concatenate the result back 8566 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 8567 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 8568 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 8569 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC), 8570 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC)); 8571} 8572 8573 8574SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const { 8575 SDValue Cond; 8576 SDValue Op0 = Op.getOperand(0); 8577 SDValue Op1 = Op.getOperand(1); 8578 SDValue CC = Op.getOperand(2); 8579 EVT VT = Op.getValueType(); 8580 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 8581 bool isFP = Op.getOperand(1).getValueType().isFloatingPoint(); 8582 DebugLoc dl = Op.getDebugLoc(); 8583 8584 if (isFP) { 8585 unsigned SSECC = 8; 8586 EVT EltVT = Op0.getValueType().getVectorElementType(); 8587 assert(EltVT == MVT::f32 || EltVT == MVT::f64); (void)EltVT; 8588 8589 bool Swap = false; 8590 8591 // SSE Condition code mapping: 8592 // 0 - EQ 8593 // 1 - LT 8594 // 2 - LE 8595 // 3 - UNORD 8596 // 4 - NEQ 8597 // 5 - NLT 8598 // 6 - NLE 8599 // 7 - ORD 8600 switch (SetCCOpcode) { 8601 default: break; 8602 case ISD::SETOEQ: 8603 case ISD::SETEQ: SSECC = 0; break; 8604 case ISD::SETOGT: 8605 case ISD::SETGT: Swap = true; // Fallthrough 8606 case ISD::SETLT: 8607 case ISD::SETOLT: SSECC = 1; break; 8608 case ISD::SETOGE: 8609 case ISD::SETGE: Swap = true; // Fallthrough 8610 case ISD::SETLE: 8611 case ISD::SETOLE: SSECC = 2; break; 8612 case ISD::SETUO: SSECC = 3; break; 8613 case ISD::SETUNE: 8614 case ISD::SETNE: SSECC = 4; break; 8615 case ISD::SETULE: Swap = true; 8616 case ISD::SETUGE: SSECC = 5; break; 8617 case ISD::SETULT: Swap = true; 8618 case ISD::SETUGT: SSECC = 6; break; 8619 case ISD::SETO: SSECC = 7; break; 8620 } 8621 if (Swap) 8622 std::swap(Op0, Op1); 8623 8624 // In the two special cases we can't handle, emit two comparisons. 8625 if (SSECC == 8) { 8626 if (SetCCOpcode == ISD::SETUEQ) { 8627 SDValue UNORD, EQ; 8628 UNORD = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 8629 DAG.getConstant(3, MVT::i8)); 8630 EQ = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 8631 DAG.getConstant(0, MVT::i8)); 8632 return DAG.getNode(ISD::OR, dl, VT, UNORD, EQ); 8633 } 8634 if (SetCCOpcode == ISD::SETONE) { 8635 SDValue ORD, NEQ; 8636 ORD = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 8637 DAG.getConstant(7, MVT::i8)); 8638 NEQ = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 8639 DAG.getConstant(4, MVT::i8)); 8640 return DAG.getNode(ISD::AND, dl, VT, ORD, NEQ); 8641 } 8642 llvm_unreachable("Illegal FP comparison"); 8643 } 8644 // Handle all other FP comparisons here. 8645 return DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 8646 DAG.getConstant(SSECC, MVT::i8)); 8647 } 8648 8649 // Break 256-bit integer vector compare into smaller ones. 8650 if (VT.is256BitVector() && !Subtarget->hasAVX2()) 8651 return Lower256IntVSETCC(Op, DAG); 8652 8653 // We are handling one of the integer comparisons here. Since SSE only has 8654 // GT and EQ comparisons for integer, swapping operands and multiple 8655 // operations may be required for some comparisons. 8656 unsigned Opc = 0; 8657 bool Swap = false, Invert = false, FlipSigns = false; 8658 8659 switch (SetCCOpcode) { 8660 default: break; 8661 case ISD::SETNE: Invert = true; 8662 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break; 8663 case ISD::SETLT: Swap = true; 8664 case ISD::SETGT: Opc = X86ISD::PCMPGT; break; 8665 case ISD::SETGE: Swap = true; 8666 case ISD::SETLE: Opc = X86ISD::PCMPGT; Invert = true; break; 8667 case ISD::SETULT: Swap = true; 8668 case ISD::SETUGT: Opc = X86ISD::PCMPGT; FlipSigns = true; break; 8669 case ISD::SETUGE: Swap = true; 8670 case ISD::SETULE: Opc = X86ISD::PCMPGT; FlipSigns = true; Invert = true; break; 8671 } 8672 if (Swap) 8673 std::swap(Op0, Op1); 8674 8675 // Check that the operation in question is available (most are plain SSE2, 8676 // but PCMPGTQ and PCMPEQQ have different requirements). 8677 if (Opc == X86ISD::PCMPGT && VT == MVT::v2i64 && !Subtarget->hasSSE42()) 8678 return SDValue(); 8679 if (Opc == X86ISD::PCMPEQ && VT == MVT::v2i64 && !Subtarget->hasSSE41()) 8680 return SDValue(); 8681 8682 // Since SSE has no unsigned integer comparisons, we need to flip the sign 8683 // bits of the inputs before performing those operations. 8684 if (FlipSigns) { 8685 EVT EltVT = VT.getVectorElementType(); 8686 SDValue SignBit = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), 8687 EltVT); 8688 std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit); 8689 SDValue SignVec = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &SignBits[0], 8690 SignBits.size()); 8691 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SignVec); 8692 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SignVec); 8693 } 8694 8695 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 8696 8697 // If the logical-not of the result is required, perform that now. 8698 if (Invert) 8699 Result = DAG.getNOT(dl, Result, VT); 8700 8701 return Result; 8702} 8703 8704// isX86LogicalCmp - Return true if opcode is a X86 logical comparison. 8705static bool isX86LogicalCmp(SDValue Op) { 8706 unsigned Opc = Op.getNode()->getOpcode(); 8707 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI || 8708 Opc == X86ISD::SAHF) 8709 return true; 8710 if (Op.getResNo() == 1 && 8711 (Opc == X86ISD::ADD || 8712 Opc == X86ISD::SUB || 8713 Opc == X86ISD::ADC || 8714 Opc == X86ISD::SBB || 8715 Opc == X86ISD::SMUL || 8716 Opc == X86ISD::UMUL || 8717 Opc == X86ISD::INC || 8718 Opc == X86ISD::DEC || 8719 Opc == X86ISD::OR || 8720 Opc == X86ISD::XOR || 8721 Opc == X86ISD::AND)) 8722 return true; 8723 8724 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL) 8725 return true; 8726 8727 return false; 8728} 8729 8730static bool isZero(SDValue V) { 8731 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 8732 return C && C->isNullValue(); 8733} 8734 8735static bool isAllOnes(SDValue V) { 8736 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 8737 return C && C->isAllOnesValue(); 8738} 8739 8740static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) { 8741 if (V.getOpcode() != ISD::TRUNCATE) 8742 return false; 8743 8744 SDValue VOp0 = V.getOperand(0); 8745 unsigned InBits = VOp0.getValueSizeInBits(); 8746 unsigned Bits = V.getValueSizeInBits(); 8747 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits)); 8748} 8749 8750SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 8751 bool addTest = true; 8752 SDValue Cond = Op.getOperand(0); 8753 SDValue Op1 = Op.getOperand(1); 8754 SDValue Op2 = Op.getOperand(2); 8755 DebugLoc DL = Op.getDebugLoc(); 8756 SDValue CC; 8757 8758 if (Cond.getOpcode() == ISD::SETCC) { 8759 SDValue NewCond = LowerSETCC(Cond, DAG); 8760 if (NewCond.getNode()) 8761 Cond = NewCond; 8762 } 8763 8764 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y 8765 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y 8766 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y 8767 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y 8768 if (Cond.getOpcode() == X86ISD::SETCC && 8769 Cond.getOperand(1).getOpcode() == X86ISD::CMP && 8770 isZero(Cond.getOperand(1).getOperand(1))) { 8771 SDValue Cmp = Cond.getOperand(1); 8772 8773 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue(); 8774 8775 if ((isAllOnes(Op1) || isAllOnes(Op2)) && 8776 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) { 8777 SDValue Y = isAllOnes(Op2) ? Op1 : Op2; 8778 8779 SDValue CmpOp0 = Cmp.getOperand(0); 8780 // Apply further optimizations for special cases 8781 // (select (x != 0), -1, 0) -> neg & sbb 8782 // (select (x == 0), 0, -1) -> neg & sbb 8783 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y)) 8784 if (YC->isNullValue() && 8785 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) { 8786 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32); 8787 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs, 8788 DAG.getConstant(0, CmpOp0.getValueType()), 8789 CmpOp0); 8790 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 8791 DAG.getConstant(X86::COND_B, MVT::i8), 8792 SDValue(Neg.getNode(), 1)); 8793 return Res; 8794 } 8795 8796 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, 8797 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); 8798 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 8799 8800 SDValue Res = // Res = 0 or -1. 8801 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 8802 DAG.getConstant(X86::COND_B, MVT::i8), Cmp); 8803 8804 if (isAllOnes(Op1) != (CondCode == X86::COND_E)) 8805 Res = DAG.getNOT(DL, Res, Res.getValueType()); 8806 8807 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2); 8808 if (N2C == 0 || !N2C->isNullValue()) 8809 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y); 8810 return Res; 8811 } 8812 } 8813 8814 // Look past (and (setcc_carry (cmp ...)), 1). 8815 if (Cond.getOpcode() == ISD::AND && 8816 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 8817 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 8818 if (C && C->getAPIntValue() == 1) 8819 Cond = Cond.getOperand(0); 8820 } 8821 8822 // If condition flag is set by a X86ISD::CMP, then use it as the condition 8823 // setting operand in place of the X86ISD::SETCC. 8824 unsigned CondOpcode = Cond.getOpcode(); 8825 if (CondOpcode == X86ISD::SETCC || 8826 CondOpcode == X86ISD::SETCC_CARRY) { 8827 CC = Cond.getOperand(0); 8828 8829 SDValue Cmp = Cond.getOperand(1); 8830 unsigned Opc = Cmp.getOpcode(); 8831 EVT VT = Op.getValueType(); 8832 8833 bool IllegalFPCMov = false; 8834 if (VT.isFloatingPoint() && !VT.isVector() && 8835 !isScalarFPTypeInSSEReg(VT)) // FPStack? 8836 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue()); 8837 8838 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) || 8839 Opc == X86ISD::BT) { // FIXME 8840 Cond = Cmp; 8841 addTest = false; 8842 } 8843 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || 8844 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || 8845 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && 8846 Cond.getOperand(0).getValueType() != MVT::i8)) { 8847 SDValue LHS = Cond.getOperand(0); 8848 SDValue RHS = Cond.getOperand(1); 8849 unsigned X86Opcode; 8850 unsigned X86Cond; 8851 SDVTList VTs; 8852 switch (CondOpcode) { 8853 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; 8854 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; 8855 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; 8856 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; 8857 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; 8858 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; 8859 default: llvm_unreachable("unexpected overflowing operator"); 8860 } 8861 if (CondOpcode == ISD::UMULO) 8862 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), 8863 MVT::i32); 8864 else 8865 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 8866 8867 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS); 8868 8869 if (CondOpcode == ISD::UMULO) 8870 Cond = X86Op.getValue(2); 8871 else 8872 Cond = X86Op.getValue(1); 8873 8874 CC = DAG.getConstant(X86Cond, MVT::i8); 8875 addTest = false; 8876 } 8877 8878 if (addTest) { 8879 // Look pass the truncate if the high bits are known zero. 8880 if (isTruncWithZeroHighBitsInput(Cond, DAG)) 8881 Cond = Cond.getOperand(0); 8882 8883 // We know the result of AND is compared against zero. Try to match 8884 // it to BT. 8885 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 8886 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG); 8887 if (NewSetCC.getNode()) { 8888 CC = NewSetCC.getOperand(0); 8889 Cond = NewSetCC.getOperand(1); 8890 addTest = false; 8891 } 8892 } 8893 } 8894 8895 if (addTest) { 8896 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 8897 Cond = EmitTest(Cond, X86::COND_NE, DAG); 8898 } 8899 8900 // a < b ? -1 : 0 -> RES = ~setcc_carry 8901 // a < b ? 0 : -1 -> RES = setcc_carry 8902 // a >= b ? -1 : 0 -> RES = setcc_carry 8903 // a >= b ? 0 : -1 -> RES = ~setcc_carry 8904 if (Cond.getOpcode() == X86ISD::SUB) { 8905 Cond = ConvertCmpIfNecessary(Cond, DAG); 8906 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue(); 8907 8908 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) && 8909 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) { 8910 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 8911 DAG.getConstant(X86::COND_B, MVT::i8), Cond); 8912 if (isAllOnes(Op1) != (CondCode == X86::COND_B)) 8913 return DAG.getNOT(DL, Res, Res.getValueType()); 8914 return Res; 8915 } 8916 } 8917 8918 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 8919 // condition is true. 8920 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 8921 SDValue Ops[] = { Op2, Op1, CC, Cond }; 8922 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops, array_lengthof(Ops)); 8923} 8924 8925// isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or 8926// ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart 8927// from the AND / OR. 8928static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) { 8929 Opc = Op.getOpcode(); 8930 if (Opc != ISD::OR && Opc != ISD::AND) 8931 return false; 8932 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC && 8933 Op.getOperand(0).hasOneUse() && 8934 Op.getOperand(1).getOpcode() == X86ISD::SETCC && 8935 Op.getOperand(1).hasOneUse()); 8936} 8937 8938// isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and 8939// 1 and that the SETCC node has a single use. 8940static bool isXor1OfSetCC(SDValue Op) { 8941 if (Op.getOpcode() != ISD::XOR) 8942 return false; 8943 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 8944 if (N1C && N1C->getAPIntValue() == 1) { 8945 return Op.getOperand(0).getOpcode() == X86ISD::SETCC && 8946 Op.getOperand(0).hasOneUse(); 8947 } 8948 return false; 8949} 8950 8951SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 8952 bool addTest = true; 8953 SDValue Chain = Op.getOperand(0); 8954 SDValue Cond = Op.getOperand(1); 8955 SDValue Dest = Op.getOperand(2); 8956 DebugLoc dl = Op.getDebugLoc(); 8957 SDValue CC; 8958 bool Inverted = false; 8959 8960 if (Cond.getOpcode() == ISD::SETCC) { 8961 // Check for setcc([su]{add,sub,mul}o == 0). 8962 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ && 8963 isa<ConstantSDNode>(Cond.getOperand(1)) && 8964 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() && 8965 Cond.getOperand(0).getResNo() == 1 && 8966 (Cond.getOperand(0).getOpcode() == ISD::SADDO || 8967 Cond.getOperand(0).getOpcode() == ISD::UADDO || 8968 Cond.getOperand(0).getOpcode() == ISD::SSUBO || 8969 Cond.getOperand(0).getOpcode() == ISD::USUBO || 8970 Cond.getOperand(0).getOpcode() == ISD::SMULO || 8971 Cond.getOperand(0).getOpcode() == ISD::UMULO)) { 8972 Inverted = true; 8973 Cond = Cond.getOperand(0); 8974 } else { 8975 SDValue NewCond = LowerSETCC(Cond, DAG); 8976 if (NewCond.getNode()) 8977 Cond = NewCond; 8978 } 8979 } 8980#if 0 8981 // FIXME: LowerXALUO doesn't handle these!! 8982 else if (Cond.getOpcode() == X86ISD::ADD || 8983 Cond.getOpcode() == X86ISD::SUB || 8984 Cond.getOpcode() == X86ISD::SMUL || 8985 Cond.getOpcode() == X86ISD::UMUL) 8986 Cond = LowerXALUO(Cond, DAG); 8987#endif 8988 8989 // Look pass (and (setcc_carry (cmp ...)), 1). 8990 if (Cond.getOpcode() == ISD::AND && 8991 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 8992 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 8993 if (C && C->getAPIntValue() == 1) 8994 Cond = Cond.getOperand(0); 8995 } 8996 8997 // If condition flag is set by a X86ISD::CMP, then use it as the condition 8998 // setting operand in place of the X86ISD::SETCC. 8999 unsigned CondOpcode = Cond.getOpcode(); 9000 if (CondOpcode == X86ISD::SETCC || 9001 CondOpcode == X86ISD::SETCC_CARRY) { 9002 CC = Cond.getOperand(0); 9003 9004 SDValue Cmp = Cond.getOperand(1); 9005 unsigned Opc = Cmp.getOpcode(); 9006 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp?? 9007 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) { 9008 Cond = Cmp; 9009 addTest = false; 9010 } else { 9011 switch (cast<ConstantSDNode>(CC)->getZExtValue()) { 9012 default: break; 9013 case X86::COND_O: 9014 case X86::COND_B: 9015 // These can only come from an arithmetic instruction with overflow, 9016 // e.g. SADDO, UADDO. 9017 Cond = Cond.getNode()->getOperand(1); 9018 addTest = false; 9019 break; 9020 } 9021 } 9022 } 9023 CondOpcode = Cond.getOpcode(); 9024 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || 9025 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || 9026 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && 9027 Cond.getOperand(0).getValueType() != MVT::i8)) { 9028 SDValue LHS = Cond.getOperand(0); 9029 SDValue RHS = Cond.getOperand(1); 9030 unsigned X86Opcode; 9031 unsigned X86Cond; 9032 SDVTList VTs; 9033 switch (CondOpcode) { 9034 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; 9035 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; 9036 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; 9037 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; 9038 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; 9039 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; 9040 default: llvm_unreachable("unexpected overflowing operator"); 9041 } 9042 if (Inverted) 9043 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond); 9044 if (CondOpcode == ISD::UMULO) 9045 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), 9046 MVT::i32); 9047 else 9048 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 9049 9050 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS); 9051 9052 if (CondOpcode == ISD::UMULO) 9053 Cond = X86Op.getValue(2); 9054 else 9055 Cond = X86Op.getValue(1); 9056 9057 CC = DAG.getConstant(X86Cond, MVT::i8); 9058 addTest = false; 9059 } else { 9060 unsigned CondOpc; 9061 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) { 9062 SDValue Cmp = Cond.getOperand(0).getOperand(1); 9063 if (CondOpc == ISD::OR) { 9064 // Also, recognize the pattern generated by an FCMP_UNE. We can emit 9065 // two branches instead of an explicit OR instruction with a 9066 // separate test. 9067 if (Cmp == Cond.getOperand(1).getOperand(1) && 9068 isX86LogicalCmp(Cmp)) { 9069 CC = Cond.getOperand(0).getOperand(0); 9070 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9071 Chain, Dest, CC, Cmp); 9072 CC = Cond.getOperand(1).getOperand(0); 9073 Cond = Cmp; 9074 addTest = false; 9075 } 9076 } else { // ISD::AND 9077 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit 9078 // two branches instead of an explicit AND instruction with a 9079 // separate test. However, we only do this if this block doesn't 9080 // have a fall-through edge, because this requires an explicit 9081 // jmp when the condition is false. 9082 if (Cmp == Cond.getOperand(1).getOperand(1) && 9083 isX86LogicalCmp(Cmp) && 9084 Op.getNode()->hasOneUse()) { 9085 X86::CondCode CCode = 9086 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 9087 CCode = X86::GetOppositeBranchCondition(CCode); 9088 CC = DAG.getConstant(CCode, MVT::i8); 9089 SDNode *User = *Op.getNode()->use_begin(); 9090 // Look for an unconditional branch following this conditional branch. 9091 // We need this because we need to reverse the successors in order 9092 // to implement FCMP_OEQ. 9093 if (User->getOpcode() == ISD::BR) { 9094 SDValue FalseBB = User->getOperand(1); 9095 SDNode *NewBR = 9096 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 9097 assert(NewBR == User); 9098 (void)NewBR; 9099 Dest = FalseBB; 9100 9101 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9102 Chain, Dest, CC, Cmp); 9103 X86::CondCode CCode = 9104 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0); 9105 CCode = X86::GetOppositeBranchCondition(CCode); 9106 CC = DAG.getConstant(CCode, MVT::i8); 9107 Cond = Cmp; 9108 addTest = false; 9109 } 9110 } 9111 } 9112 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) { 9113 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition. 9114 // It should be transformed during dag combiner except when the condition 9115 // is set by a arithmetics with overflow node. 9116 X86::CondCode CCode = 9117 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 9118 CCode = X86::GetOppositeBranchCondition(CCode); 9119 CC = DAG.getConstant(CCode, MVT::i8); 9120 Cond = Cond.getOperand(0).getOperand(1); 9121 addTest = false; 9122 } else if (Cond.getOpcode() == ISD::SETCC && 9123 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) { 9124 // For FCMP_OEQ, we can emit 9125 // two branches instead of an explicit AND instruction with a 9126 // separate test. However, we only do this if this block doesn't 9127 // have a fall-through edge, because this requires an explicit 9128 // jmp when the condition is false. 9129 if (Op.getNode()->hasOneUse()) { 9130 SDNode *User = *Op.getNode()->use_begin(); 9131 // Look for an unconditional branch following this conditional branch. 9132 // We need this because we need to reverse the successors in order 9133 // to implement FCMP_OEQ. 9134 if (User->getOpcode() == ISD::BR) { 9135 SDValue FalseBB = User->getOperand(1); 9136 SDNode *NewBR = 9137 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 9138 assert(NewBR == User); 9139 (void)NewBR; 9140 Dest = FalseBB; 9141 9142 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 9143 Cond.getOperand(0), Cond.getOperand(1)); 9144 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 9145 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 9146 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9147 Chain, Dest, CC, Cmp); 9148 CC = DAG.getConstant(X86::COND_P, MVT::i8); 9149 Cond = Cmp; 9150 addTest = false; 9151 } 9152 } 9153 } else if (Cond.getOpcode() == ISD::SETCC && 9154 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) { 9155 // For FCMP_UNE, we can emit 9156 // two branches instead of an explicit AND instruction with a 9157 // separate test. However, we only do this if this block doesn't 9158 // have a fall-through edge, because this requires an explicit 9159 // jmp when the condition is false. 9160 if (Op.getNode()->hasOneUse()) { 9161 SDNode *User = *Op.getNode()->use_begin(); 9162 // Look for an unconditional branch following this conditional branch. 9163 // We need this because we need to reverse the successors in order 9164 // to implement FCMP_UNE. 9165 if (User->getOpcode() == ISD::BR) { 9166 SDValue FalseBB = User->getOperand(1); 9167 SDNode *NewBR = 9168 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 9169 assert(NewBR == User); 9170 (void)NewBR; 9171 9172 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 9173 Cond.getOperand(0), Cond.getOperand(1)); 9174 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 9175 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 9176 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9177 Chain, Dest, CC, Cmp); 9178 CC = DAG.getConstant(X86::COND_NP, MVT::i8); 9179 Cond = Cmp; 9180 addTest = false; 9181 Dest = FalseBB; 9182 } 9183 } 9184 } 9185 } 9186 9187 if (addTest) { 9188 // Look pass the truncate if the high bits are known zero. 9189 if (isTruncWithZeroHighBitsInput(Cond, DAG)) 9190 Cond = Cond.getOperand(0); 9191 9192 // We know the result of AND is compared against zero. Try to match 9193 // it to BT. 9194 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 9195 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG); 9196 if (NewSetCC.getNode()) { 9197 CC = NewSetCC.getOperand(0); 9198 Cond = NewSetCC.getOperand(1); 9199 addTest = false; 9200 } 9201 } 9202 } 9203 9204 if (addTest) { 9205 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 9206 Cond = EmitTest(Cond, X86::COND_NE, DAG); 9207 } 9208 Cond = ConvertCmpIfNecessary(Cond, DAG); 9209 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9210 Chain, Dest, CC, Cond); 9211} 9212 9213 9214// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 9215// Calls to _alloca is needed to probe the stack when allocating more than 4k 9216// bytes in one go. Touching the stack at 4K increments is necessary to ensure 9217// that the guard pages used by the OS virtual memory manager are allocated in 9218// correct sequence. 9219SDValue 9220X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 9221 SelectionDAG &DAG) const { 9222 assert((Subtarget->isTargetCygMing() || Subtarget->isTargetWindows() || 9223 getTargetMachine().Options.EnableSegmentedStacks) && 9224 "This should be used only on Windows targets or when segmented stacks " 9225 "are being used"); 9226 assert(!Subtarget->isTargetEnvMacho() && "Not implemented"); 9227 DebugLoc dl = Op.getDebugLoc(); 9228 9229 // Get the inputs. 9230 SDValue Chain = Op.getOperand(0); 9231 SDValue Size = Op.getOperand(1); 9232 // FIXME: Ensure alignment here 9233 9234 bool Is64Bit = Subtarget->is64Bit(); 9235 EVT SPTy = Is64Bit ? MVT::i64 : MVT::i32; 9236 9237 if (getTargetMachine().Options.EnableSegmentedStacks) { 9238 MachineFunction &MF = DAG.getMachineFunction(); 9239 MachineRegisterInfo &MRI = MF.getRegInfo(); 9240 9241 if (Is64Bit) { 9242 // The 64 bit implementation of segmented stacks needs to clobber both r10 9243 // r11. This makes it impossible to use it along with nested parameters. 9244 const Function *F = MF.getFunction(); 9245 9246 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 9247 I != E; ++I) 9248 if (I->hasNestAttr()) 9249 report_fatal_error("Cannot use segmented stacks with functions that " 9250 "have nested arguments."); 9251 } 9252 9253 const TargetRegisterClass *AddrRegClass = 9254 getRegClassFor(Subtarget->is64Bit() ? MVT::i64:MVT::i32); 9255 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass); 9256 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size); 9257 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain, 9258 DAG.getRegister(Vreg, SPTy)); 9259 SDValue Ops1[2] = { Value, Chain }; 9260 return DAG.getMergeValues(Ops1, 2, dl); 9261 } else { 9262 SDValue Flag; 9263 unsigned Reg = (Subtarget->is64Bit() ? X86::RAX : X86::EAX); 9264 9265 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag); 9266 Flag = Chain.getValue(1); 9267 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 9268 9269 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag); 9270 Flag = Chain.getValue(1); 9271 9272 Chain = DAG.getCopyFromReg(Chain, dl, X86StackPtr, SPTy).getValue(1); 9273 9274 SDValue Ops1[2] = { Chain.getValue(0), Chain }; 9275 return DAG.getMergeValues(Ops1, 2, dl); 9276 } 9277} 9278 9279SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 9280 MachineFunction &MF = DAG.getMachineFunction(); 9281 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 9282 9283 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 9284 DebugLoc DL = Op.getDebugLoc(); 9285 9286 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) { 9287 // vastart just stores the address of the VarArgsFrameIndex slot into the 9288 // memory location argument. 9289 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 9290 getPointerTy()); 9291 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), 9292 MachinePointerInfo(SV), false, false, 0); 9293 } 9294 9295 // __va_list_tag: 9296 // gp_offset (0 - 6 * 8) 9297 // fp_offset (48 - 48 + 8 * 16) 9298 // overflow_arg_area (point to parameters coming in memory). 9299 // reg_save_area 9300 SmallVector<SDValue, 8> MemOps; 9301 SDValue FIN = Op.getOperand(1); 9302 // Store gp_offset 9303 SDValue Store = DAG.getStore(Op.getOperand(0), DL, 9304 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), 9305 MVT::i32), 9306 FIN, MachinePointerInfo(SV), false, false, 0); 9307 MemOps.push_back(Store); 9308 9309 // Store fp_offset 9310 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 9311 FIN, DAG.getIntPtrConstant(4)); 9312 Store = DAG.getStore(Op.getOperand(0), DL, 9313 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), 9314 MVT::i32), 9315 FIN, MachinePointerInfo(SV, 4), false, false, 0); 9316 MemOps.push_back(Store); 9317 9318 // Store ptr to overflow_arg_area 9319 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 9320 FIN, DAG.getIntPtrConstant(4)); 9321 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 9322 getPointerTy()); 9323 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, 9324 MachinePointerInfo(SV, 8), 9325 false, false, 0); 9326 MemOps.push_back(Store); 9327 9328 // Store ptr to reg_save_area. 9329 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 9330 FIN, DAG.getIntPtrConstant(8)); 9331 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 9332 getPointerTy()); 9333 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN, 9334 MachinePointerInfo(SV, 16), false, false, 0); 9335 MemOps.push_back(Store); 9336 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 9337 &MemOps[0], MemOps.size()); 9338} 9339 9340SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 9341 assert(Subtarget->is64Bit() && 9342 "LowerVAARG only handles 64-bit va_arg!"); 9343 assert((Subtarget->isTargetLinux() || 9344 Subtarget->isTargetDarwin()) && 9345 "Unhandled target in LowerVAARG"); 9346 assert(Op.getNode()->getNumOperands() == 4); 9347 SDValue Chain = Op.getOperand(0); 9348 SDValue SrcPtr = Op.getOperand(1); 9349 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 9350 unsigned Align = Op.getConstantOperandVal(3); 9351 DebugLoc dl = Op.getDebugLoc(); 9352 9353 EVT ArgVT = Op.getNode()->getValueType(0); 9354 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 9355 uint32_t ArgSize = getTargetData()->getTypeAllocSize(ArgTy); 9356 uint8_t ArgMode; 9357 9358 // Decide which area this value should be read from. 9359 // TODO: Implement the AMD64 ABI in its entirety. This simple 9360 // selection mechanism works only for the basic types. 9361 if (ArgVT == MVT::f80) { 9362 llvm_unreachable("va_arg for f80 not yet implemented"); 9363 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) { 9364 ArgMode = 2; // Argument passed in XMM register. Use fp_offset. 9365 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) { 9366 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset. 9367 } else { 9368 llvm_unreachable("Unhandled argument type in LowerVAARG"); 9369 } 9370 9371 if (ArgMode == 2) { 9372 // Sanity Check: Make sure using fp_offset makes sense. 9373 assert(!getTargetMachine().Options.UseSoftFloat && 9374 !(DAG.getMachineFunction() 9375 .getFunction()->hasFnAttr(Attribute::NoImplicitFloat)) && 9376 Subtarget->hasSSE1()); 9377 } 9378 9379 // Insert VAARG_64 node into the DAG 9380 // VAARG_64 returns two values: Variable Argument Address, Chain 9381 SmallVector<SDValue, 11> InstOps; 9382 InstOps.push_back(Chain); 9383 InstOps.push_back(SrcPtr); 9384 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32)); 9385 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8)); 9386 InstOps.push_back(DAG.getConstant(Align, MVT::i32)); 9387 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other); 9388 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl, 9389 VTs, &InstOps[0], InstOps.size(), 9390 MVT::i64, 9391 MachinePointerInfo(SV), 9392 /*Align=*/0, 9393 /*Volatile=*/false, 9394 /*ReadMem=*/true, 9395 /*WriteMem=*/true); 9396 Chain = VAARG.getValue(1); 9397 9398 // Load the next argument and return it 9399 return DAG.getLoad(ArgVT, dl, 9400 Chain, 9401 VAARG, 9402 MachinePointerInfo(), 9403 false, false, false, 0); 9404} 9405 9406SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 9407 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 9408 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!"); 9409 SDValue Chain = Op.getOperand(0); 9410 SDValue DstPtr = Op.getOperand(1); 9411 SDValue SrcPtr = Op.getOperand(2); 9412 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 9413 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 9414 DebugLoc DL = Op.getDebugLoc(); 9415 9416 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, 9417 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false, 9418 false, 9419 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 9420} 9421 9422// getTargetVShiftNOde - Handle vector element shifts where the shift amount 9423// may or may not be a constant. Takes immediate version of shift as input. 9424static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT, 9425 SDValue SrcOp, SDValue ShAmt, 9426 SelectionDAG &DAG) { 9427 assert(ShAmt.getValueType() == MVT::i32 && "ShAmt is not i32"); 9428 9429 if (isa<ConstantSDNode>(ShAmt)) { 9430 // Constant may be a TargetConstant. Use a regular constant. 9431 uint32_t ShiftAmt = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 9432 switch (Opc) { 9433 default: llvm_unreachable("Unknown target vector shift node"); 9434 case X86ISD::VSHLI: 9435 case X86ISD::VSRLI: 9436 case X86ISD::VSRAI: 9437 return DAG.getNode(Opc, dl, VT, SrcOp, 9438 DAG.getConstant(ShiftAmt, MVT::i32)); 9439 } 9440 } 9441 9442 // Change opcode to non-immediate version 9443 switch (Opc) { 9444 default: llvm_unreachable("Unknown target vector shift node"); 9445 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break; 9446 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break; 9447 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break; 9448 } 9449 9450 // Need to build a vector containing shift amount 9451 // Shift amount is 32-bits, but SSE instructions read 64-bit, so fill with 0 9452 SDValue ShOps[4]; 9453 ShOps[0] = ShAmt; 9454 ShOps[1] = DAG.getConstant(0, MVT::i32); 9455 ShOps[2] = DAG.getUNDEF(MVT::i32); 9456 ShOps[3] = DAG.getUNDEF(MVT::i32); 9457 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &ShOps[0], 4); 9458 9459 // The return type has to be a 128-bit type with the same element 9460 // type as the input type. 9461 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 9462 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits()); 9463 9464 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt); 9465 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt); 9466} 9467 9468SDValue 9469X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { 9470 DebugLoc dl = Op.getDebugLoc(); 9471 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9472 switch (IntNo) { 9473 default: return SDValue(); // Don't custom lower most intrinsics. 9474 // Comparison intrinsics. 9475 case Intrinsic::x86_sse_comieq_ss: 9476 case Intrinsic::x86_sse_comilt_ss: 9477 case Intrinsic::x86_sse_comile_ss: 9478 case Intrinsic::x86_sse_comigt_ss: 9479 case Intrinsic::x86_sse_comige_ss: 9480 case Intrinsic::x86_sse_comineq_ss: 9481 case Intrinsic::x86_sse_ucomieq_ss: 9482 case Intrinsic::x86_sse_ucomilt_ss: 9483 case Intrinsic::x86_sse_ucomile_ss: 9484 case Intrinsic::x86_sse_ucomigt_ss: 9485 case Intrinsic::x86_sse_ucomige_ss: 9486 case Intrinsic::x86_sse_ucomineq_ss: 9487 case Intrinsic::x86_sse2_comieq_sd: 9488 case Intrinsic::x86_sse2_comilt_sd: 9489 case Intrinsic::x86_sse2_comile_sd: 9490 case Intrinsic::x86_sse2_comigt_sd: 9491 case Intrinsic::x86_sse2_comige_sd: 9492 case Intrinsic::x86_sse2_comineq_sd: 9493 case Intrinsic::x86_sse2_ucomieq_sd: 9494 case Intrinsic::x86_sse2_ucomilt_sd: 9495 case Intrinsic::x86_sse2_ucomile_sd: 9496 case Intrinsic::x86_sse2_ucomigt_sd: 9497 case Intrinsic::x86_sse2_ucomige_sd: 9498 case Intrinsic::x86_sse2_ucomineq_sd: { 9499 unsigned Opc = 0; 9500 ISD::CondCode CC = ISD::SETCC_INVALID; 9501 switch (IntNo) { 9502 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 9503 case Intrinsic::x86_sse_comieq_ss: 9504 case Intrinsic::x86_sse2_comieq_sd: 9505 Opc = X86ISD::COMI; 9506 CC = ISD::SETEQ; 9507 break; 9508 case Intrinsic::x86_sse_comilt_ss: 9509 case Intrinsic::x86_sse2_comilt_sd: 9510 Opc = X86ISD::COMI; 9511 CC = ISD::SETLT; 9512 break; 9513 case Intrinsic::x86_sse_comile_ss: 9514 case Intrinsic::x86_sse2_comile_sd: 9515 Opc = X86ISD::COMI; 9516 CC = ISD::SETLE; 9517 break; 9518 case Intrinsic::x86_sse_comigt_ss: 9519 case Intrinsic::x86_sse2_comigt_sd: 9520 Opc = X86ISD::COMI; 9521 CC = ISD::SETGT; 9522 break; 9523 case Intrinsic::x86_sse_comige_ss: 9524 case Intrinsic::x86_sse2_comige_sd: 9525 Opc = X86ISD::COMI; 9526 CC = ISD::SETGE; 9527 break; 9528 case Intrinsic::x86_sse_comineq_ss: 9529 case Intrinsic::x86_sse2_comineq_sd: 9530 Opc = X86ISD::COMI; 9531 CC = ISD::SETNE; 9532 break; 9533 case Intrinsic::x86_sse_ucomieq_ss: 9534 case Intrinsic::x86_sse2_ucomieq_sd: 9535 Opc = X86ISD::UCOMI; 9536 CC = ISD::SETEQ; 9537 break; 9538 case Intrinsic::x86_sse_ucomilt_ss: 9539 case Intrinsic::x86_sse2_ucomilt_sd: 9540 Opc = X86ISD::UCOMI; 9541 CC = ISD::SETLT; 9542 break; 9543 case Intrinsic::x86_sse_ucomile_ss: 9544 case Intrinsic::x86_sse2_ucomile_sd: 9545 Opc = X86ISD::UCOMI; 9546 CC = ISD::SETLE; 9547 break; 9548 case Intrinsic::x86_sse_ucomigt_ss: 9549 case Intrinsic::x86_sse2_ucomigt_sd: 9550 Opc = X86ISD::UCOMI; 9551 CC = ISD::SETGT; 9552 break; 9553 case Intrinsic::x86_sse_ucomige_ss: 9554 case Intrinsic::x86_sse2_ucomige_sd: 9555 Opc = X86ISD::UCOMI; 9556 CC = ISD::SETGE; 9557 break; 9558 case Intrinsic::x86_sse_ucomineq_ss: 9559 case Intrinsic::x86_sse2_ucomineq_sd: 9560 Opc = X86ISD::UCOMI; 9561 CC = ISD::SETNE; 9562 break; 9563 } 9564 9565 SDValue LHS = Op.getOperand(1); 9566 SDValue RHS = Op.getOperand(2); 9567 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG); 9568 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!"); 9569 SDValue Cond = DAG.getNode(Opc, dl, MVT::i32, LHS, RHS); 9570 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 9571 DAG.getConstant(X86CC, MVT::i8), Cond); 9572 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 9573 } 9574 // Arithmetic intrinsics. 9575 case Intrinsic::x86_sse2_pmulu_dq: 9576 case Intrinsic::x86_avx2_pmulu_dq: 9577 return DAG.getNode(X86ISD::PMULUDQ, dl, Op.getValueType(), 9578 Op.getOperand(1), Op.getOperand(2)); 9579 case Intrinsic::x86_sse3_hadd_ps: 9580 case Intrinsic::x86_sse3_hadd_pd: 9581 case Intrinsic::x86_avx_hadd_ps_256: 9582 case Intrinsic::x86_avx_hadd_pd_256: 9583 return DAG.getNode(X86ISD::FHADD, dl, Op.getValueType(), 9584 Op.getOperand(1), Op.getOperand(2)); 9585 case Intrinsic::x86_sse3_hsub_ps: 9586 case Intrinsic::x86_sse3_hsub_pd: 9587 case Intrinsic::x86_avx_hsub_ps_256: 9588 case Intrinsic::x86_avx_hsub_pd_256: 9589 return DAG.getNode(X86ISD::FHSUB, dl, Op.getValueType(), 9590 Op.getOperand(1), Op.getOperand(2)); 9591 case Intrinsic::x86_ssse3_phadd_w_128: 9592 case Intrinsic::x86_ssse3_phadd_d_128: 9593 case Intrinsic::x86_avx2_phadd_w: 9594 case Intrinsic::x86_avx2_phadd_d: 9595 return DAG.getNode(X86ISD::HADD, dl, Op.getValueType(), 9596 Op.getOperand(1), Op.getOperand(2)); 9597 case Intrinsic::x86_ssse3_phsub_w_128: 9598 case Intrinsic::x86_ssse3_phsub_d_128: 9599 case Intrinsic::x86_avx2_phsub_w: 9600 case Intrinsic::x86_avx2_phsub_d: 9601 return DAG.getNode(X86ISD::HSUB, dl, Op.getValueType(), 9602 Op.getOperand(1), Op.getOperand(2)); 9603 case Intrinsic::x86_avx2_psllv_d: 9604 case Intrinsic::x86_avx2_psllv_q: 9605 case Intrinsic::x86_avx2_psllv_d_256: 9606 case Intrinsic::x86_avx2_psllv_q_256: 9607 return DAG.getNode(ISD::SHL, dl, Op.getValueType(), 9608 Op.getOperand(1), Op.getOperand(2)); 9609 case Intrinsic::x86_avx2_psrlv_d: 9610 case Intrinsic::x86_avx2_psrlv_q: 9611 case Intrinsic::x86_avx2_psrlv_d_256: 9612 case Intrinsic::x86_avx2_psrlv_q_256: 9613 return DAG.getNode(ISD::SRL, dl, Op.getValueType(), 9614 Op.getOperand(1), Op.getOperand(2)); 9615 case Intrinsic::x86_avx2_psrav_d: 9616 case Intrinsic::x86_avx2_psrav_d_256: 9617 return DAG.getNode(ISD::SRA, dl, Op.getValueType(), 9618 Op.getOperand(1), Op.getOperand(2)); 9619 case Intrinsic::x86_ssse3_pshuf_b_128: 9620 case Intrinsic::x86_avx2_pshuf_b: 9621 return DAG.getNode(X86ISD::PSHUFB, dl, Op.getValueType(), 9622 Op.getOperand(1), Op.getOperand(2)); 9623 case Intrinsic::x86_ssse3_psign_b_128: 9624 case Intrinsic::x86_ssse3_psign_w_128: 9625 case Intrinsic::x86_ssse3_psign_d_128: 9626 case Intrinsic::x86_avx2_psign_b: 9627 case Intrinsic::x86_avx2_psign_w: 9628 case Intrinsic::x86_avx2_psign_d: 9629 return DAG.getNode(X86ISD::PSIGN, dl, Op.getValueType(), 9630 Op.getOperand(1), Op.getOperand(2)); 9631 case Intrinsic::x86_sse41_insertps: 9632 return DAG.getNode(X86ISD::INSERTPS, dl, Op.getValueType(), 9633 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 9634 case Intrinsic::x86_avx_vperm2f128_ps_256: 9635 case Intrinsic::x86_avx_vperm2f128_pd_256: 9636 case Intrinsic::x86_avx_vperm2f128_si_256: 9637 case Intrinsic::x86_avx2_vperm2i128: 9638 return DAG.getNode(X86ISD::VPERM2X128, dl, Op.getValueType(), 9639 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 9640 case Intrinsic::x86_avx2_permd: 9641 case Intrinsic::x86_avx2_permps: 9642 // Operands intentionally swapped. Mask is last operand to intrinsic, 9643 // but second operand for node/intruction. 9644 return DAG.getNode(X86ISD::VPERMV, dl, Op.getValueType(), 9645 Op.getOperand(2), Op.getOperand(1)); 9646 9647 // ptest and testp intrinsics. The intrinsic these come from are designed to 9648 // return an integer value, not just an instruction so lower it to the ptest 9649 // or testp pattern and a setcc for the result. 9650 case Intrinsic::x86_sse41_ptestz: 9651 case Intrinsic::x86_sse41_ptestc: 9652 case Intrinsic::x86_sse41_ptestnzc: 9653 case Intrinsic::x86_avx_ptestz_256: 9654 case Intrinsic::x86_avx_ptestc_256: 9655 case Intrinsic::x86_avx_ptestnzc_256: 9656 case Intrinsic::x86_avx_vtestz_ps: 9657 case Intrinsic::x86_avx_vtestc_ps: 9658 case Intrinsic::x86_avx_vtestnzc_ps: 9659 case Intrinsic::x86_avx_vtestz_pd: 9660 case Intrinsic::x86_avx_vtestc_pd: 9661 case Intrinsic::x86_avx_vtestnzc_pd: 9662 case Intrinsic::x86_avx_vtestz_ps_256: 9663 case Intrinsic::x86_avx_vtestc_ps_256: 9664 case Intrinsic::x86_avx_vtestnzc_ps_256: 9665 case Intrinsic::x86_avx_vtestz_pd_256: 9666 case Intrinsic::x86_avx_vtestc_pd_256: 9667 case Intrinsic::x86_avx_vtestnzc_pd_256: { 9668 bool IsTestPacked = false; 9669 unsigned X86CC = 0; 9670 switch (IntNo) { 9671 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering."); 9672 case Intrinsic::x86_avx_vtestz_ps: 9673 case Intrinsic::x86_avx_vtestz_pd: 9674 case Intrinsic::x86_avx_vtestz_ps_256: 9675 case Intrinsic::x86_avx_vtestz_pd_256: 9676 IsTestPacked = true; // Fallthrough 9677 case Intrinsic::x86_sse41_ptestz: 9678 case Intrinsic::x86_avx_ptestz_256: 9679 // ZF = 1 9680 X86CC = X86::COND_E; 9681 break; 9682 case Intrinsic::x86_avx_vtestc_ps: 9683 case Intrinsic::x86_avx_vtestc_pd: 9684 case Intrinsic::x86_avx_vtestc_ps_256: 9685 case Intrinsic::x86_avx_vtestc_pd_256: 9686 IsTestPacked = true; // Fallthrough 9687 case Intrinsic::x86_sse41_ptestc: 9688 case Intrinsic::x86_avx_ptestc_256: 9689 // CF = 1 9690 X86CC = X86::COND_B; 9691 break; 9692 case Intrinsic::x86_avx_vtestnzc_ps: 9693 case Intrinsic::x86_avx_vtestnzc_pd: 9694 case Intrinsic::x86_avx_vtestnzc_ps_256: 9695 case Intrinsic::x86_avx_vtestnzc_pd_256: 9696 IsTestPacked = true; // Fallthrough 9697 case Intrinsic::x86_sse41_ptestnzc: 9698 case Intrinsic::x86_avx_ptestnzc_256: 9699 // ZF and CF = 0 9700 X86CC = X86::COND_A; 9701 break; 9702 } 9703 9704 SDValue LHS = Op.getOperand(1); 9705 SDValue RHS = Op.getOperand(2); 9706 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST; 9707 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS); 9708 SDValue CC = DAG.getConstant(X86CC, MVT::i8); 9709 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test); 9710 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 9711 } 9712 9713 // SSE/AVX shift intrinsics 9714 case Intrinsic::x86_sse2_psll_w: 9715 case Intrinsic::x86_sse2_psll_d: 9716 case Intrinsic::x86_sse2_psll_q: 9717 case Intrinsic::x86_avx2_psll_w: 9718 case Intrinsic::x86_avx2_psll_d: 9719 case Intrinsic::x86_avx2_psll_q: 9720 return DAG.getNode(X86ISD::VSHL, dl, Op.getValueType(), 9721 Op.getOperand(1), Op.getOperand(2)); 9722 case Intrinsic::x86_sse2_psrl_w: 9723 case Intrinsic::x86_sse2_psrl_d: 9724 case Intrinsic::x86_sse2_psrl_q: 9725 case Intrinsic::x86_avx2_psrl_w: 9726 case Intrinsic::x86_avx2_psrl_d: 9727 case Intrinsic::x86_avx2_psrl_q: 9728 return DAG.getNode(X86ISD::VSRL, dl, Op.getValueType(), 9729 Op.getOperand(1), Op.getOperand(2)); 9730 case Intrinsic::x86_sse2_psra_w: 9731 case Intrinsic::x86_sse2_psra_d: 9732 case Intrinsic::x86_avx2_psra_w: 9733 case Intrinsic::x86_avx2_psra_d: 9734 return DAG.getNode(X86ISD::VSRA, dl, Op.getValueType(), 9735 Op.getOperand(1), Op.getOperand(2)); 9736 case Intrinsic::x86_sse2_pslli_w: 9737 case Intrinsic::x86_sse2_pslli_d: 9738 case Intrinsic::x86_sse2_pslli_q: 9739 case Intrinsic::x86_avx2_pslli_w: 9740 case Intrinsic::x86_avx2_pslli_d: 9741 case Intrinsic::x86_avx2_pslli_q: 9742 return getTargetVShiftNode(X86ISD::VSHLI, dl, Op.getValueType(), 9743 Op.getOperand(1), Op.getOperand(2), DAG); 9744 case Intrinsic::x86_sse2_psrli_w: 9745 case Intrinsic::x86_sse2_psrli_d: 9746 case Intrinsic::x86_sse2_psrli_q: 9747 case Intrinsic::x86_avx2_psrli_w: 9748 case Intrinsic::x86_avx2_psrli_d: 9749 case Intrinsic::x86_avx2_psrli_q: 9750 return getTargetVShiftNode(X86ISD::VSRLI, dl, Op.getValueType(), 9751 Op.getOperand(1), Op.getOperand(2), DAG); 9752 case Intrinsic::x86_sse2_psrai_w: 9753 case Intrinsic::x86_sse2_psrai_d: 9754 case Intrinsic::x86_avx2_psrai_w: 9755 case Intrinsic::x86_avx2_psrai_d: 9756 return getTargetVShiftNode(X86ISD::VSRAI, dl, Op.getValueType(), 9757 Op.getOperand(1), Op.getOperand(2), DAG); 9758 // Fix vector shift instructions where the last operand is a non-immediate 9759 // i32 value. 9760 case Intrinsic::x86_mmx_pslli_w: 9761 case Intrinsic::x86_mmx_pslli_d: 9762 case Intrinsic::x86_mmx_pslli_q: 9763 case Intrinsic::x86_mmx_psrli_w: 9764 case Intrinsic::x86_mmx_psrli_d: 9765 case Intrinsic::x86_mmx_psrli_q: 9766 case Intrinsic::x86_mmx_psrai_w: 9767 case Intrinsic::x86_mmx_psrai_d: { 9768 SDValue ShAmt = Op.getOperand(2); 9769 if (isa<ConstantSDNode>(ShAmt)) 9770 return SDValue(); 9771 9772 unsigned NewIntNo = 0; 9773 switch (IntNo) { 9774 case Intrinsic::x86_mmx_pslli_w: 9775 NewIntNo = Intrinsic::x86_mmx_psll_w; 9776 break; 9777 case Intrinsic::x86_mmx_pslli_d: 9778 NewIntNo = Intrinsic::x86_mmx_psll_d; 9779 break; 9780 case Intrinsic::x86_mmx_pslli_q: 9781 NewIntNo = Intrinsic::x86_mmx_psll_q; 9782 break; 9783 case Intrinsic::x86_mmx_psrli_w: 9784 NewIntNo = Intrinsic::x86_mmx_psrl_w; 9785 break; 9786 case Intrinsic::x86_mmx_psrli_d: 9787 NewIntNo = Intrinsic::x86_mmx_psrl_d; 9788 break; 9789 case Intrinsic::x86_mmx_psrli_q: 9790 NewIntNo = Intrinsic::x86_mmx_psrl_q; 9791 break; 9792 case Intrinsic::x86_mmx_psrai_w: 9793 NewIntNo = Intrinsic::x86_mmx_psra_w; 9794 break; 9795 case Intrinsic::x86_mmx_psrai_d: 9796 NewIntNo = Intrinsic::x86_mmx_psra_d; 9797 break; 9798 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 9799 } 9800 9801 // The vector shift intrinsics with scalars uses 32b shift amounts but 9802 // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits 9803 // to be zero. 9804 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, ShAmt, 9805 DAG.getConstant(0, MVT::i32)); 9806// FIXME this must be lowered to get rid of the invalid type. 9807 9808 EVT VT = Op.getValueType(); 9809 ShAmt = DAG.getNode(ISD::BITCAST, dl, VT, ShAmt); 9810 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9811 DAG.getConstant(NewIntNo, MVT::i32), 9812 Op.getOperand(1), ShAmt); 9813 } 9814 case Intrinsic::x86_sse42_pcmpistria128: 9815 case Intrinsic::x86_sse42_pcmpestria128: 9816 case Intrinsic::x86_sse42_pcmpistric128: 9817 case Intrinsic::x86_sse42_pcmpestric128: 9818 case Intrinsic::x86_sse42_pcmpistrio128: 9819 case Intrinsic::x86_sse42_pcmpestrio128: 9820 case Intrinsic::x86_sse42_pcmpistris128: 9821 case Intrinsic::x86_sse42_pcmpestris128: 9822 case Intrinsic::x86_sse42_pcmpistriz128: 9823 case Intrinsic::x86_sse42_pcmpestriz128: { 9824 unsigned Opcode; 9825 unsigned X86CC; 9826 switch (IntNo) { 9827 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 9828 case Intrinsic::x86_sse42_pcmpistria128: 9829 Opcode = X86ISD::PCMPISTRI; 9830 X86CC = X86::COND_A; 9831 break; 9832 case Intrinsic::x86_sse42_pcmpestria128: 9833 Opcode = X86ISD::PCMPESTRI; 9834 X86CC = X86::COND_A; 9835 break; 9836 case Intrinsic::x86_sse42_pcmpistric128: 9837 Opcode = X86ISD::PCMPISTRI; 9838 X86CC = X86::COND_B; 9839 break; 9840 case Intrinsic::x86_sse42_pcmpestric128: 9841 Opcode = X86ISD::PCMPESTRI; 9842 X86CC = X86::COND_B; 9843 break; 9844 case Intrinsic::x86_sse42_pcmpistrio128: 9845 Opcode = X86ISD::PCMPISTRI; 9846 X86CC = X86::COND_O; 9847 break; 9848 case Intrinsic::x86_sse42_pcmpestrio128: 9849 Opcode = X86ISD::PCMPESTRI; 9850 X86CC = X86::COND_O; 9851 break; 9852 case Intrinsic::x86_sse42_pcmpistris128: 9853 Opcode = X86ISD::PCMPISTRI; 9854 X86CC = X86::COND_S; 9855 break; 9856 case Intrinsic::x86_sse42_pcmpestris128: 9857 Opcode = X86ISD::PCMPESTRI; 9858 X86CC = X86::COND_S; 9859 break; 9860 case Intrinsic::x86_sse42_pcmpistriz128: 9861 Opcode = X86ISD::PCMPISTRI; 9862 X86CC = X86::COND_E; 9863 break; 9864 case Intrinsic::x86_sse42_pcmpestriz128: 9865 Opcode = X86ISD::PCMPESTRI; 9866 X86CC = X86::COND_E; 9867 break; 9868 } 9869 SmallVector<SDValue, 5> NewOps; 9870 NewOps.append(Op->op_begin()+1, Op->op_end()); 9871 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 9872 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size()); 9873 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 9874 DAG.getConstant(X86CC, MVT::i8), 9875 SDValue(PCMP.getNode(), 1)); 9876 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 9877 } 9878 case Intrinsic::x86_sse42_pcmpistri128: 9879 case Intrinsic::x86_sse42_pcmpestri128: { 9880 unsigned Opcode; 9881 if (IntNo == Intrinsic::x86_sse42_pcmpistri128) 9882 Opcode = X86ISD::PCMPISTRI; 9883 else 9884 Opcode = X86ISD::PCMPESTRI; 9885 9886 SmallVector<SDValue, 5> NewOps; 9887 NewOps.append(Op->op_begin()+1, Op->op_end()); 9888 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 9889 return DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size()); 9890 } 9891 } 9892} 9893 9894SDValue 9895X86TargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const { 9896 DebugLoc dl = Op.getDebugLoc(); 9897 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 9898 switch (IntNo) { 9899 default: return SDValue(); // Don't custom lower most intrinsics. 9900 9901 // RDRAND intrinsics. 9902 case Intrinsic::x86_rdrand_16: 9903 case Intrinsic::x86_rdrand_32: 9904 case Intrinsic::x86_rdrand_64: { 9905 // Emit the node with the right value type. 9906 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other); 9907 SDValue Result = DAG.getNode(X86ISD::RDRAND, dl, VTs, Op.getOperand(0)); 9908 9909 // If the value returned by RDRAND was valid (CF=1), return 1. Otherwise 9910 // return the value from Rand, which is always 0, casted to i32. 9911 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)), 9912 DAG.getConstant(1, Op->getValueType(1)), 9913 DAG.getConstant(X86::COND_B, MVT::i32), 9914 SDValue(Result.getNode(), 1) }; 9915 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, 9916 DAG.getVTList(Op->getValueType(1), MVT::Glue), 9917 Ops, 4); 9918 9919 // Return { result, isValid, chain }. 9920 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid, 9921 SDValue(Result.getNode(), 2)); 9922 } 9923 } 9924} 9925 9926SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, 9927 SelectionDAG &DAG) const { 9928 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 9929 MFI->setReturnAddressIsTaken(true); 9930 9931 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9932 DebugLoc dl = Op.getDebugLoc(); 9933 9934 if (Depth > 0) { 9935 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 9936 SDValue Offset = 9937 DAG.getConstant(TD->getPointerSize(), 9938 Subtarget->is64Bit() ? MVT::i64 : MVT::i32); 9939 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 9940 DAG.getNode(ISD::ADD, dl, getPointerTy(), 9941 FrameAddr, Offset), 9942 MachinePointerInfo(), false, false, false, 0); 9943 } 9944 9945 // Just load the return address. 9946 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG); 9947 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 9948 RetAddrFI, MachinePointerInfo(), false, false, false, 0); 9949} 9950 9951SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 9952 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 9953 MFI->setFrameAddressIsTaken(true); 9954 9955 EVT VT = Op.getValueType(); 9956 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 9957 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9958 unsigned FrameReg = Subtarget->is64Bit() ? X86::RBP : X86::EBP; 9959 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 9960 while (Depth--) 9961 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 9962 MachinePointerInfo(), 9963 false, false, false, 0); 9964 return FrameAddr; 9965} 9966 9967SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, 9968 SelectionDAG &DAG) const { 9969 return DAG.getIntPtrConstant(2*TD->getPointerSize()); 9970} 9971 9972SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 9973 SDValue Chain = Op.getOperand(0); 9974 SDValue Offset = Op.getOperand(1); 9975 SDValue Handler = Op.getOperand(2); 9976 DebugLoc dl = Op.getDebugLoc(); 9977 9978 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 9979 Subtarget->is64Bit() ? X86::RBP : X86::EBP, 9980 getPointerTy()); 9981 unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX); 9982 9983 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame, 9984 DAG.getIntPtrConstant(TD->getPointerSize())); 9985 StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset); 9986 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), 9987 false, false, 0); 9988 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr); 9989 9990 return DAG.getNode(X86ISD::EH_RETURN, dl, 9991 MVT::Other, 9992 Chain, DAG.getRegister(StoreAddrReg, getPointerTy())); 9993} 9994 9995SDValue X86TargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 9996 SelectionDAG &DAG) const { 9997 return Op.getOperand(0); 9998} 9999 10000SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 10001 SelectionDAG &DAG) const { 10002 SDValue Root = Op.getOperand(0); 10003 SDValue Trmp = Op.getOperand(1); // trampoline 10004 SDValue FPtr = Op.getOperand(2); // nested function 10005 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 10006 DebugLoc dl = Op.getDebugLoc(); 10007 10008 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 10009 10010 if (Subtarget->is64Bit()) { 10011 SDValue OutChains[6]; 10012 10013 // Large code-model. 10014 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode. 10015 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode. 10016 10017 const unsigned char N86R10 = X86_MC::getX86RegNum(X86::R10); 10018 const unsigned char N86R11 = X86_MC::getX86RegNum(X86::R11); 10019 10020 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 10021 10022 // Load the pointer to the nested function into R11. 10023 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 10024 SDValue Addr = Trmp; 10025 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 10026 Addr, MachinePointerInfo(TrmpAddr), 10027 false, false, 0); 10028 10029 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10030 DAG.getConstant(2, MVT::i64)); 10031 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr, 10032 MachinePointerInfo(TrmpAddr, 2), 10033 false, false, 2); 10034 10035 // Load the 'nest' parameter value into R10. 10036 // R10 is specified in X86CallingConv.td 10037 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 10038 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10039 DAG.getConstant(10, MVT::i64)); 10040 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 10041 Addr, MachinePointerInfo(TrmpAddr, 10), 10042 false, false, 0); 10043 10044 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10045 DAG.getConstant(12, MVT::i64)); 10046 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr, 10047 MachinePointerInfo(TrmpAddr, 12), 10048 false, false, 2); 10049 10050 // Jump to the nested function. 10051 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 10052 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10053 DAG.getConstant(20, MVT::i64)); 10054 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 10055 Addr, MachinePointerInfo(TrmpAddr, 20), 10056 false, false, 0); 10057 10058 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 10059 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10060 DAG.getConstant(22, MVT::i64)); 10061 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr, 10062 MachinePointerInfo(TrmpAddr, 22), 10063 false, false, 0); 10064 10065 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 6); 10066 } else { 10067 const Function *Func = 10068 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 10069 CallingConv::ID CC = Func->getCallingConv(); 10070 unsigned NestReg; 10071 10072 switch (CC) { 10073 default: 10074 llvm_unreachable("Unsupported calling convention"); 10075 case CallingConv::C: 10076 case CallingConv::X86_StdCall: { 10077 // Pass 'nest' parameter in ECX. 10078 // Must be kept in sync with X86CallingConv.td 10079 NestReg = X86::ECX; 10080 10081 // Check that ECX wasn't needed by an 'inreg' parameter. 10082 FunctionType *FTy = Func->getFunctionType(); 10083 const AttrListPtr &Attrs = Func->getAttributes(); 10084 10085 if (!Attrs.isEmpty() && !Func->isVarArg()) { 10086 unsigned InRegCount = 0; 10087 unsigned Idx = 1; 10088 10089 for (FunctionType::param_iterator I = FTy->param_begin(), 10090 E = FTy->param_end(); I != E; ++I, ++Idx) 10091 if (Attrs.paramHasAttr(Idx, Attribute::InReg)) 10092 // FIXME: should only count parameters that are lowered to integers. 10093 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32; 10094 10095 if (InRegCount > 2) { 10096 report_fatal_error("Nest register in use - reduce number of inreg" 10097 " parameters!"); 10098 } 10099 } 10100 break; 10101 } 10102 case CallingConv::X86_FastCall: 10103 case CallingConv::X86_ThisCall: 10104 case CallingConv::Fast: 10105 // Pass 'nest' parameter in EAX. 10106 // Must be kept in sync with X86CallingConv.td 10107 NestReg = X86::EAX; 10108 break; 10109 } 10110 10111 SDValue OutChains[4]; 10112 SDValue Addr, Disp; 10113 10114 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 10115 DAG.getConstant(10, MVT::i32)); 10116 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr); 10117 10118 // This is storing the opcode for MOV32ri. 10119 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte. 10120 const unsigned char N86Reg = X86_MC::getX86RegNum(NestReg); 10121 OutChains[0] = DAG.getStore(Root, dl, 10122 DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 10123 Trmp, MachinePointerInfo(TrmpAddr), 10124 false, false, 0); 10125 10126 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 10127 DAG.getConstant(1, MVT::i32)); 10128 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr, 10129 MachinePointerInfo(TrmpAddr, 1), 10130 false, false, 1); 10131 10132 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode. 10133 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 10134 DAG.getConstant(5, MVT::i32)); 10135 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr, 10136 MachinePointerInfo(TrmpAddr, 5), 10137 false, false, 1); 10138 10139 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 10140 DAG.getConstant(6, MVT::i32)); 10141 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr, 10142 MachinePointerInfo(TrmpAddr, 6), 10143 false, false, 1); 10144 10145 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 4); 10146 } 10147} 10148 10149SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, 10150 SelectionDAG &DAG) const { 10151 /* 10152 The rounding mode is in bits 11:10 of FPSR, and has the following 10153 settings: 10154 00 Round to nearest 10155 01 Round to -inf 10156 10 Round to +inf 10157 11 Round to 0 10158 10159 FLT_ROUNDS, on the other hand, expects the following: 10160 -1 Undefined 10161 0 Round to 0 10162 1 Round to nearest 10163 2 Round to +inf 10164 3 Round to -inf 10165 10166 To perform the conversion, we do: 10167 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 10168 */ 10169 10170 MachineFunction &MF = DAG.getMachineFunction(); 10171 const TargetMachine &TM = MF.getTarget(); 10172 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 10173 unsigned StackAlignment = TFI.getStackAlignment(); 10174 EVT VT = Op.getValueType(); 10175 DebugLoc DL = Op.getDebugLoc(); 10176 10177 // Save FP Control Word to stack slot 10178 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false); 10179 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 10180 10181 10182 MachineMemOperand *MMO = 10183 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 10184 MachineMemOperand::MOStore, 2, 2); 10185 10186 SDValue Ops[] = { DAG.getEntryNode(), StackSlot }; 10187 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL, 10188 DAG.getVTList(MVT::Other), 10189 Ops, 2, MVT::i16, MMO); 10190 10191 // Load FP Control Word from stack slot 10192 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, 10193 MachinePointerInfo(), false, false, false, 0); 10194 10195 // Transform as necessary 10196 SDValue CWD1 = 10197 DAG.getNode(ISD::SRL, DL, MVT::i16, 10198 DAG.getNode(ISD::AND, DL, MVT::i16, 10199 CWD, DAG.getConstant(0x800, MVT::i16)), 10200 DAG.getConstant(11, MVT::i8)); 10201 SDValue CWD2 = 10202 DAG.getNode(ISD::SRL, DL, MVT::i16, 10203 DAG.getNode(ISD::AND, DL, MVT::i16, 10204 CWD, DAG.getConstant(0x400, MVT::i16)), 10205 DAG.getConstant(9, MVT::i8)); 10206 10207 SDValue RetVal = 10208 DAG.getNode(ISD::AND, DL, MVT::i16, 10209 DAG.getNode(ISD::ADD, DL, MVT::i16, 10210 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2), 10211 DAG.getConstant(1, MVT::i16)), 10212 DAG.getConstant(3, MVT::i16)); 10213 10214 10215 return DAG.getNode((VT.getSizeInBits() < 16 ? 10216 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal); 10217} 10218 10219SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const { 10220 EVT VT = Op.getValueType(); 10221 EVT OpVT = VT; 10222 unsigned NumBits = VT.getSizeInBits(); 10223 DebugLoc dl = Op.getDebugLoc(); 10224 10225 Op = Op.getOperand(0); 10226 if (VT == MVT::i8) { 10227 // Zero extend to i32 since there is not an i8 bsr. 10228 OpVT = MVT::i32; 10229 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 10230 } 10231 10232 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 10233 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 10234 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 10235 10236 // If src is zero (i.e. bsr sets ZF), returns NumBits. 10237 SDValue Ops[] = { 10238 Op, 10239 DAG.getConstant(NumBits+NumBits-1, OpVT), 10240 DAG.getConstant(X86::COND_E, MVT::i8), 10241 Op.getValue(1) 10242 }; 10243 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); 10244 10245 // Finally xor with NumBits-1. 10246 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 10247 10248 if (VT == MVT::i8) 10249 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 10250 return Op; 10251} 10252 10253SDValue X86TargetLowering::LowerCTLZ_ZERO_UNDEF(SDValue Op, 10254 SelectionDAG &DAG) const { 10255 EVT VT = Op.getValueType(); 10256 EVT OpVT = VT; 10257 unsigned NumBits = VT.getSizeInBits(); 10258 DebugLoc dl = Op.getDebugLoc(); 10259 10260 Op = Op.getOperand(0); 10261 if (VT == MVT::i8) { 10262 // Zero extend to i32 since there is not an i8 bsr. 10263 OpVT = MVT::i32; 10264 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 10265 } 10266 10267 // Issue a bsr (scan bits in reverse). 10268 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 10269 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 10270 10271 // And xor with NumBits-1. 10272 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 10273 10274 if (VT == MVT::i8) 10275 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 10276 return Op; 10277} 10278 10279SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const { 10280 EVT VT = Op.getValueType(); 10281 unsigned NumBits = VT.getSizeInBits(); 10282 DebugLoc dl = Op.getDebugLoc(); 10283 Op = Op.getOperand(0); 10284 10285 // Issue a bsf (scan bits forward) which also sets EFLAGS. 10286 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 10287 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op); 10288 10289 // If src is zero (i.e. bsf sets ZF), returns NumBits. 10290 SDValue Ops[] = { 10291 Op, 10292 DAG.getConstant(NumBits, VT), 10293 DAG.getConstant(X86::COND_E, MVT::i8), 10294 Op.getValue(1) 10295 }; 10296 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops, array_lengthof(Ops)); 10297} 10298 10299// Lower256IntArith - Break a 256-bit integer operation into two new 128-bit 10300// ones, and then concatenate the result back. 10301static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) { 10302 EVT VT = Op.getValueType(); 10303 10304 assert(VT.is256BitVector() && VT.isInteger() && 10305 "Unsupported value type for operation"); 10306 10307 unsigned NumElems = VT.getVectorNumElements(); 10308 DebugLoc dl = Op.getDebugLoc(); 10309 10310 // Extract the LHS vectors 10311 SDValue LHS = Op.getOperand(0); 10312 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 10313 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 10314 10315 // Extract the RHS vectors 10316 SDValue RHS = Op.getOperand(1); 10317 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl); 10318 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl); 10319 10320 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 10321 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 10322 10323 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 10324 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1), 10325 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2)); 10326} 10327 10328SDValue X86TargetLowering::LowerADD(SDValue Op, SelectionDAG &DAG) const { 10329 assert(Op.getValueType().is256BitVector() && 10330 Op.getValueType().isInteger() && 10331 "Only handle AVX 256-bit vector integer operation"); 10332 return Lower256IntArith(Op, DAG); 10333} 10334 10335SDValue X86TargetLowering::LowerSUB(SDValue Op, SelectionDAG &DAG) const { 10336 assert(Op.getValueType().is256BitVector() && 10337 Op.getValueType().isInteger() && 10338 "Only handle AVX 256-bit vector integer operation"); 10339 return Lower256IntArith(Op, DAG); 10340} 10341 10342SDValue X86TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 10343 EVT VT = Op.getValueType(); 10344 10345 // Decompose 256-bit ops into smaller 128-bit ops. 10346 if (VT.is256BitVector() && !Subtarget->hasAVX2()) 10347 return Lower256IntArith(Op, DAG); 10348 10349 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && 10350 "Only know how to lower V2I64/V4I64 multiply"); 10351 10352 DebugLoc dl = Op.getDebugLoc(); 10353 10354 // Ahi = psrlqi(a, 32); 10355 // Bhi = psrlqi(b, 32); 10356 // 10357 // AloBlo = pmuludq(a, b); 10358 // AloBhi = pmuludq(a, Bhi); 10359 // AhiBlo = pmuludq(Ahi, b); 10360 10361 // AloBhi = psllqi(AloBhi, 32); 10362 // AhiBlo = psllqi(AhiBlo, 32); 10363 // return AloBlo + AloBhi + AhiBlo; 10364 10365 SDValue A = Op.getOperand(0); 10366 SDValue B = Op.getOperand(1); 10367 10368 SDValue ShAmt = DAG.getConstant(32, MVT::i32); 10369 10370 SDValue Ahi = DAG.getNode(X86ISD::VSRLI, dl, VT, A, ShAmt); 10371 SDValue Bhi = DAG.getNode(X86ISD::VSRLI, dl, VT, B, ShAmt); 10372 10373 // Bit cast to 32-bit vectors for MULUDQ 10374 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 : MVT::v8i32; 10375 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A); 10376 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B); 10377 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi); 10378 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi); 10379 10380 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B); 10381 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi); 10382 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B); 10383 10384 AloBhi = DAG.getNode(X86ISD::VSHLI, dl, VT, AloBhi, ShAmt); 10385 AhiBlo = DAG.getNode(X86ISD::VSHLI, dl, VT, AhiBlo, ShAmt); 10386 10387 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi); 10388 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo); 10389} 10390 10391SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { 10392 10393 EVT VT = Op.getValueType(); 10394 DebugLoc dl = Op.getDebugLoc(); 10395 SDValue R = Op.getOperand(0); 10396 SDValue Amt = Op.getOperand(1); 10397 LLVMContext *Context = DAG.getContext(); 10398 10399 if (!Subtarget->hasSSE2()) 10400 return SDValue(); 10401 10402 // Optimize shl/srl/sra with constant shift amount. 10403 if (isSplatVector(Amt.getNode())) { 10404 SDValue SclrAmt = Amt->getOperand(0); 10405 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) { 10406 uint64_t ShiftAmt = C->getZExtValue(); 10407 10408 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || 10409 (Subtarget->hasAVX2() && 10410 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16))) { 10411 if (Op.getOpcode() == ISD::SHL) 10412 return DAG.getNode(X86ISD::VSHLI, dl, VT, R, 10413 DAG.getConstant(ShiftAmt, MVT::i32)); 10414 if (Op.getOpcode() == ISD::SRL) 10415 return DAG.getNode(X86ISD::VSRLI, dl, VT, R, 10416 DAG.getConstant(ShiftAmt, MVT::i32)); 10417 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64) 10418 return DAG.getNode(X86ISD::VSRAI, dl, VT, R, 10419 DAG.getConstant(ShiftAmt, MVT::i32)); 10420 } 10421 10422 if (VT == MVT::v16i8) { 10423 if (Op.getOpcode() == ISD::SHL) { 10424 // Make a large shift. 10425 SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v8i16, R, 10426 DAG.getConstant(ShiftAmt, MVT::i32)); 10427 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL); 10428 // Zero out the rightmost bits. 10429 SmallVector<SDValue, 16> V(16, 10430 DAG.getConstant(uint8_t(-1U << ShiftAmt), 10431 MVT::i8)); 10432 return DAG.getNode(ISD::AND, dl, VT, SHL, 10433 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16)); 10434 } 10435 if (Op.getOpcode() == ISD::SRL) { 10436 // Make a large shift. 10437 SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v8i16, R, 10438 DAG.getConstant(ShiftAmt, MVT::i32)); 10439 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL); 10440 // Zero out the leftmost bits. 10441 SmallVector<SDValue, 16> V(16, 10442 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, 10443 MVT::i8)); 10444 return DAG.getNode(ISD::AND, dl, VT, SRL, 10445 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16)); 10446 } 10447 if (Op.getOpcode() == ISD::SRA) { 10448 if (ShiftAmt == 7) { 10449 // R s>> 7 === R s< 0 10450 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 10451 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R); 10452 } 10453 10454 // R s>> a === ((R u>> a) ^ m) - m 10455 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); 10456 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt, 10457 MVT::i8)); 10458 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16); 10459 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); 10460 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); 10461 return Res; 10462 } 10463 llvm_unreachable("Unknown shift opcode."); 10464 } 10465 10466 if (Subtarget->hasAVX2() && VT == MVT::v32i8) { 10467 if (Op.getOpcode() == ISD::SHL) { 10468 // Make a large shift. 10469 SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v16i16, R, 10470 DAG.getConstant(ShiftAmt, MVT::i32)); 10471 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL); 10472 // Zero out the rightmost bits. 10473 SmallVector<SDValue, 32> V(32, 10474 DAG.getConstant(uint8_t(-1U << ShiftAmt), 10475 MVT::i8)); 10476 return DAG.getNode(ISD::AND, dl, VT, SHL, 10477 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32)); 10478 } 10479 if (Op.getOpcode() == ISD::SRL) { 10480 // Make a large shift. 10481 SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v16i16, R, 10482 DAG.getConstant(ShiftAmt, MVT::i32)); 10483 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL); 10484 // Zero out the leftmost bits. 10485 SmallVector<SDValue, 32> V(32, 10486 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, 10487 MVT::i8)); 10488 return DAG.getNode(ISD::AND, dl, VT, SRL, 10489 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32)); 10490 } 10491 if (Op.getOpcode() == ISD::SRA) { 10492 if (ShiftAmt == 7) { 10493 // R s>> 7 === R s< 0 10494 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 10495 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R); 10496 } 10497 10498 // R s>> a === ((R u>> a) ^ m) - m 10499 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); 10500 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt, 10501 MVT::i8)); 10502 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32); 10503 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); 10504 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); 10505 return Res; 10506 } 10507 llvm_unreachable("Unknown shift opcode."); 10508 } 10509 } 10510 } 10511 10512 // Lower SHL with variable shift amount. 10513 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) { 10514 Op = DAG.getNode(X86ISD::VSHLI, dl, VT, Op.getOperand(1), 10515 DAG.getConstant(23, MVT::i32)); 10516 10517 const uint32_t CV[] = { 0x3f800000U, 0x3f800000U, 0x3f800000U, 0x3f800000U}; 10518 Constant *C = ConstantDataVector::get(*Context, CV); 10519 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 10520 SDValue Addend = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 10521 MachinePointerInfo::getConstantPool(), 10522 false, false, false, 16); 10523 10524 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend); 10525 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op); 10526 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op); 10527 return DAG.getNode(ISD::MUL, dl, VT, Op, R); 10528 } 10529 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) { 10530 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq."); 10531 10532 // a = a << 5; 10533 Op = DAG.getNode(X86ISD::VSHLI, dl, MVT::v8i16, Op.getOperand(1), 10534 DAG.getConstant(5, MVT::i32)); 10535 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op); 10536 10537 // Turn 'a' into a mask suitable for VSELECT 10538 SDValue VSelM = DAG.getConstant(0x80, VT); 10539 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 10540 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 10541 10542 SDValue CM1 = DAG.getConstant(0x0f, VT); 10543 SDValue CM2 = DAG.getConstant(0x3f, VT); 10544 10545 // r = VSELECT(r, psllw(r & (char16)15, 4), a); 10546 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1); 10547 M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 10548 DAG.getConstant(4, MVT::i32), DAG); 10549 M = DAG.getNode(ISD::BITCAST, dl, VT, M); 10550 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); 10551 10552 // a += a 10553 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 10554 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 10555 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 10556 10557 // r = VSELECT(r, psllw(r & (char16)63, 2), a); 10558 M = DAG.getNode(ISD::AND, dl, VT, R, CM2); 10559 M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 10560 DAG.getConstant(2, MVT::i32), DAG); 10561 M = DAG.getNode(ISD::BITCAST, dl, VT, M); 10562 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); 10563 10564 // a += a 10565 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 10566 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 10567 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 10568 10569 // return VSELECT(r, r+r, a); 10570 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, 10571 DAG.getNode(ISD::ADD, dl, VT, R, R), R); 10572 return R; 10573 } 10574 10575 // Decompose 256-bit shifts into smaller 128-bit shifts. 10576 if (VT.is256BitVector()) { 10577 unsigned NumElems = VT.getVectorNumElements(); 10578 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 10579 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 10580 10581 // Extract the two vectors 10582 SDValue V1 = Extract128BitVector(R, 0, DAG, dl); 10583 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl); 10584 10585 // Recreate the shift amount vectors 10586 SDValue Amt1, Amt2; 10587 if (Amt.getOpcode() == ISD::BUILD_VECTOR) { 10588 // Constant shift amount 10589 SmallVector<SDValue, 4> Amt1Csts; 10590 SmallVector<SDValue, 4> Amt2Csts; 10591 for (unsigned i = 0; i != NumElems/2; ++i) 10592 Amt1Csts.push_back(Amt->getOperand(i)); 10593 for (unsigned i = NumElems/2; i != NumElems; ++i) 10594 Amt2Csts.push_back(Amt->getOperand(i)); 10595 10596 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 10597 &Amt1Csts[0], NumElems/2); 10598 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 10599 &Amt2Csts[0], NumElems/2); 10600 } else { 10601 // Variable shift amount 10602 Amt1 = Extract128BitVector(Amt, 0, DAG, dl); 10603 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl); 10604 } 10605 10606 // Issue new vector shifts for the smaller types 10607 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1); 10608 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2); 10609 10610 // Concatenate the result back 10611 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2); 10612 } 10613 10614 return SDValue(); 10615} 10616 10617SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const { 10618 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus 10619 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering 10620 // looks for this combo and may remove the "setcc" instruction if the "setcc" 10621 // has only one use. 10622 SDNode *N = Op.getNode(); 10623 SDValue LHS = N->getOperand(0); 10624 SDValue RHS = N->getOperand(1); 10625 unsigned BaseOp = 0; 10626 unsigned Cond = 0; 10627 DebugLoc DL = Op.getDebugLoc(); 10628 switch (Op.getOpcode()) { 10629 default: llvm_unreachable("Unknown ovf instruction!"); 10630 case ISD::SADDO: 10631 // A subtract of one will be selected as a INC. Note that INC doesn't 10632 // set CF, so we can't do this for UADDO. 10633 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 10634 if (C->isOne()) { 10635 BaseOp = X86ISD::INC; 10636 Cond = X86::COND_O; 10637 break; 10638 } 10639 BaseOp = X86ISD::ADD; 10640 Cond = X86::COND_O; 10641 break; 10642 case ISD::UADDO: 10643 BaseOp = X86ISD::ADD; 10644 Cond = X86::COND_B; 10645 break; 10646 case ISD::SSUBO: 10647 // A subtract of one will be selected as a DEC. Note that DEC doesn't 10648 // set CF, so we can't do this for USUBO. 10649 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 10650 if (C->isOne()) { 10651 BaseOp = X86ISD::DEC; 10652 Cond = X86::COND_O; 10653 break; 10654 } 10655 BaseOp = X86ISD::SUB; 10656 Cond = X86::COND_O; 10657 break; 10658 case ISD::USUBO: 10659 BaseOp = X86ISD::SUB; 10660 Cond = X86::COND_B; 10661 break; 10662 case ISD::SMULO: 10663 BaseOp = X86ISD::SMUL; 10664 Cond = X86::COND_O; 10665 break; 10666 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs 10667 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0), 10668 MVT::i32); 10669 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS); 10670 10671 SDValue SetCC = 10672 DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 10673 DAG.getConstant(X86::COND_O, MVT::i32), 10674 SDValue(Sum.getNode(), 2)); 10675 10676 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 10677 } 10678 } 10679 10680 // Also sets EFLAGS. 10681 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); 10682 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS); 10683 10684 SDValue SetCC = 10685 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1), 10686 DAG.getConstant(Cond, MVT::i32), 10687 SDValue(Sum.getNode(), 1)); 10688 10689 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 10690} 10691 10692SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 10693 SelectionDAG &DAG) const { 10694 DebugLoc dl = Op.getDebugLoc(); 10695 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 10696 EVT VT = Op.getValueType(); 10697 10698 if (!Subtarget->hasSSE2() || !VT.isVector()) 10699 return SDValue(); 10700 10701 unsigned BitsDiff = VT.getScalarType().getSizeInBits() - 10702 ExtraVT.getScalarType().getSizeInBits(); 10703 SDValue ShAmt = DAG.getConstant(BitsDiff, MVT::i32); 10704 10705 switch (VT.getSimpleVT().SimpleTy) { 10706 default: return SDValue(); 10707 case MVT::v8i32: 10708 case MVT::v16i16: 10709 if (!Subtarget->hasAVX()) 10710 return SDValue(); 10711 if (!Subtarget->hasAVX2()) { 10712 // needs to be split 10713 unsigned NumElems = VT.getVectorNumElements(); 10714 10715 // Extract the LHS vectors 10716 SDValue LHS = Op.getOperand(0); 10717 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 10718 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 10719 10720 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 10721 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 10722 10723 EVT ExtraEltVT = ExtraVT.getVectorElementType(); 10724 unsigned ExtraNumElems = ExtraVT.getVectorNumElements(); 10725 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT, 10726 ExtraNumElems/2); 10727 SDValue Extra = DAG.getValueType(ExtraVT); 10728 10729 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra); 10730 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra); 10731 10732 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);; 10733 } 10734 // fall through 10735 case MVT::v4i32: 10736 case MVT::v8i16: { 10737 SDValue Tmp1 = getTargetVShiftNode(X86ISD::VSHLI, dl, VT, 10738 Op.getOperand(0), ShAmt, DAG); 10739 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, Tmp1, ShAmt, DAG); 10740 } 10741 } 10742} 10743 10744 10745SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{ 10746 DebugLoc dl = Op.getDebugLoc(); 10747 10748 // Go ahead and emit the fence on x86-64 even if we asked for no-sse2. 10749 // There isn't any reason to disable it if the target processor supports it. 10750 if (!Subtarget->hasSSE2() && !Subtarget->is64Bit()) { 10751 SDValue Chain = Op.getOperand(0); 10752 SDValue Zero = DAG.getConstant(0, MVT::i32); 10753 SDValue Ops[] = { 10754 DAG.getRegister(X86::ESP, MVT::i32), // Base 10755 DAG.getTargetConstant(1, MVT::i8), // Scale 10756 DAG.getRegister(0, MVT::i32), // Index 10757 DAG.getTargetConstant(0, MVT::i32), // Disp 10758 DAG.getRegister(0, MVT::i32), // Segment. 10759 Zero, 10760 Chain 10761 }; 10762 SDNode *Res = 10763 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, 10764 array_lengthof(Ops)); 10765 return SDValue(Res, 0); 10766 } 10767 10768 unsigned isDev = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); 10769 if (!isDev) 10770 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 10771 10772 unsigned Op1 = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 10773 unsigned Op2 = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 10774 unsigned Op3 = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 10775 unsigned Op4 = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 10776 10777 // def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>; 10778 if (!Op1 && !Op2 && !Op3 && Op4) 10779 return DAG.getNode(X86ISD::SFENCE, dl, MVT::Other, Op.getOperand(0)); 10780 10781 // def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>; 10782 if (Op1 && !Op2 && !Op3 && !Op4) 10783 return DAG.getNode(X86ISD::LFENCE, dl, MVT::Other, Op.getOperand(0)); 10784 10785 // def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)), 10786 // (MFENCE)>; 10787 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 10788} 10789 10790SDValue X86TargetLowering::LowerATOMIC_FENCE(SDValue Op, 10791 SelectionDAG &DAG) const { 10792 DebugLoc dl = Op.getDebugLoc(); 10793 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( 10794 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); 10795 SynchronizationScope FenceScope = static_cast<SynchronizationScope>( 10796 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); 10797 10798 // The only fence that needs an instruction is a sequentially-consistent 10799 // cross-thread fence. 10800 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) { 10801 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for 10802 // no-sse2). There isn't any reason to disable it if the target processor 10803 // supports it. 10804 if (Subtarget->hasSSE2() || Subtarget->is64Bit()) 10805 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 10806 10807 SDValue Chain = Op.getOperand(0); 10808 SDValue Zero = DAG.getConstant(0, MVT::i32); 10809 SDValue Ops[] = { 10810 DAG.getRegister(X86::ESP, MVT::i32), // Base 10811 DAG.getTargetConstant(1, MVT::i8), // Scale 10812 DAG.getRegister(0, MVT::i32), // Index 10813 DAG.getTargetConstant(0, MVT::i32), // Disp 10814 DAG.getRegister(0, MVT::i32), // Segment. 10815 Zero, 10816 Chain 10817 }; 10818 SDNode *Res = 10819 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, 10820 array_lengthof(Ops)); 10821 return SDValue(Res, 0); 10822 } 10823 10824 // MEMBARRIER is a compiler barrier; it codegens to a no-op. 10825 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 10826} 10827 10828 10829SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 10830 EVT T = Op.getValueType(); 10831 DebugLoc DL = Op.getDebugLoc(); 10832 unsigned Reg = 0; 10833 unsigned size = 0; 10834 switch(T.getSimpleVT().SimpleTy) { 10835 default: llvm_unreachable("Invalid value type!"); 10836 case MVT::i8: Reg = X86::AL; size = 1; break; 10837 case MVT::i16: Reg = X86::AX; size = 2; break; 10838 case MVT::i32: Reg = X86::EAX; size = 4; break; 10839 case MVT::i64: 10840 assert(Subtarget->is64Bit() && "Node not type legal!"); 10841 Reg = X86::RAX; size = 8; 10842 break; 10843 } 10844 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg, 10845 Op.getOperand(2), SDValue()); 10846 SDValue Ops[] = { cpIn.getValue(0), 10847 Op.getOperand(1), 10848 Op.getOperand(3), 10849 DAG.getTargetConstant(size, MVT::i8), 10850 cpIn.getValue(1) }; 10851 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 10852 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand(); 10853 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys, 10854 Ops, 5, T, MMO); 10855 SDValue cpOut = 10856 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1)); 10857 return cpOut; 10858} 10859 10860SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op, 10861 SelectionDAG &DAG) const { 10862 assert(Subtarget->is64Bit() && "Result not type legalized?"); 10863 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 10864 SDValue TheChain = Op.getOperand(0); 10865 DebugLoc dl = Op.getDebugLoc(); 10866 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 10867 SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1)); 10868 SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64, 10869 rax.getValue(2)); 10870 SDValue Tmp = DAG.getNode(ISD::SHL, dl, MVT::i64, rdx, 10871 DAG.getConstant(32, MVT::i8)); 10872 SDValue Ops[] = { 10873 DAG.getNode(ISD::OR, dl, MVT::i64, rax, Tmp), 10874 rdx.getValue(1) 10875 }; 10876 return DAG.getMergeValues(Ops, 2, dl); 10877} 10878 10879SDValue X86TargetLowering::LowerBITCAST(SDValue Op, 10880 SelectionDAG &DAG) const { 10881 EVT SrcVT = Op.getOperand(0).getValueType(); 10882 EVT DstVT = Op.getValueType(); 10883 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() && 10884 Subtarget->hasMMX() && "Unexpected custom BITCAST"); 10885 assert((DstVT == MVT::i64 || 10886 (DstVT.isVector() && DstVT.getSizeInBits()==64)) && 10887 "Unexpected custom BITCAST"); 10888 // i64 <=> MMX conversions are Legal. 10889 if (SrcVT==MVT::i64 && DstVT.isVector()) 10890 return Op; 10891 if (DstVT==MVT::i64 && SrcVT.isVector()) 10892 return Op; 10893 // MMX <=> MMX conversions are Legal. 10894 if (SrcVT.isVector() && DstVT.isVector()) 10895 return Op; 10896 // All other conversions need to be expanded. 10897 return SDValue(); 10898} 10899 10900SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const { 10901 SDNode *Node = Op.getNode(); 10902 DebugLoc dl = Node->getDebugLoc(); 10903 EVT T = Node->getValueType(0); 10904 SDValue negOp = DAG.getNode(ISD::SUB, dl, T, 10905 DAG.getConstant(0, T), Node->getOperand(2)); 10906 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, 10907 cast<AtomicSDNode>(Node)->getMemoryVT(), 10908 Node->getOperand(0), 10909 Node->getOperand(1), negOp, 10910 cast<AtomicSDNode>(Node)->getSrcValue(), 10911 cast<AtomicSDNode>(Node)->getAlignment(), 10912 cast<AtomicSDNode>(Node)->getOrdering(), 10913 cast<AtomicSDNode>(Node)->getSynchScope()); 10914} 10915 10916static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) { 10917 SDNode *Node = Op.getNode(); 10918 DebugLoc dl = Node->getDebugLoc(); 10919 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 10920 10921 // Convert seq_cst store -> xchg 10922 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b) 10923 // FIXME: On 32-bit, store -> fist or movq would be more efficient 10924 // (The only way to get a 16-byte store is cmpxchg16b) 10925 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment. 10926 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent || 10927 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 10928 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, 10929 cast<AtomicSDNode>(Node)->getMemoryVT(), 10930 Node->getOperand(0), 10931 Node->getOperand(1), Node->getOperand(2), 10932 cast<AtomicSDNode>(Node)->getMemOperand(), 10933 cast<AtomicSDNode>(Node)->getOrdering(), 10934 cast<AtomicSDNode>(Node)->getSynchScope()); 10935 return Swap.getValue(1); 10936 } 10937 // Other atomic stores have a simple pattern. 10938 return Op; 10939} 10940 10941static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 10942 EVT VT = Op.getNode()->getValueType(0); 10943 10944 // Let legalize expand this if it isn't a legal type yet. 10945 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 10946 return SDValue(); 10947 10948 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 10949 10950 unsigned Opc; 10951 bool ExtraOp = false; 10952 switch (Op.getOpcode()) { 10953 default: llvm_unreachable("Invalid code"); 10954 case ISD::ADDC: Opc = X86ISD::ADD; break; 10955 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break; 10956 case ISD::SUBC: Opc = X86ISD::SUB; break; 10957 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break; 10958 } 10959 10960 if (!ExtraOp) 10961 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 10962 Op.getOperand(1)); 10963 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 10964 Op.getOperand(1), Op.getOperand(2)); 10965} 10966 10967/// LowerOperation - Provide custom lowering hooks for some operations. 10968/// 10969SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 10970 switch (Op.getOpcode()) { 10971 default: llvm_unreachable("Should not custom lower this!"); 10972 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG); 10973 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op,DAG); 10974 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op,DAG); 10975 case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG); 10976 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG); 10977 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG); 10978 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 10979 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 10980 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 10981 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 10982 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 10983 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); 10984 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG); 10985 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 10986 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 10987 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 10988 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 10989 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 10990 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 10991 case ISD::SHL_PARTS: 10992 case ISD::SRA_PARTS: 10993 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG); 10994 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 10995 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 10996 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 10997 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 10998 case ISD::FABS: return LowerFABS(Op, DAG); 10999 case ISD::FNEG: return LowerFNEG(Op, DAG); 11000 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 11001 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG); 11002 case ISD::SETCC: return LowerSETCC(Op, DAG); 11003 case ISD::SELECT: return LowerSELECT(Op, DAG); 11004 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 11005 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 11006 case ISD::VASTART: return LowerVASTART(Op, DAG); 11007 case ISD::VAARG: return LowerVAARG(Op, DAG); 11008 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 11009 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 11010 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 11011 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 11012 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 11013 case ISD::FRAME_TO_ARGS_OFFSET: 11014 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 11015 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 11016 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 11017 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 11018 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 11019 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 11020 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 11021 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG); 11022 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 11023 case ISD::MUL: return LowerMUL(Op, DAG); 11024 case ISD::SRA: 11025 case ISD::SRL: 11026 case ISD::SHL: return LowerShift(Op, DAG); 11027 case ISD::SADDO: 11028 case ISD::UADDO: 11029 case ISD::SSUBO: 11030 case ISD::USUBO: 11031 case ISD::SMULO: 11032 case ISD::UMULO: return LowerXALUO(Op, DAG); 11033 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG); 11034 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 11035 case ISD::ADDC: 11036 case ISD::ADDE: 11037 case ISD::SUBC: 11038 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 11039 case ISD::ADD: return LowerADD(Op, DAG); 11040 case ISD::SUB: return LowerSUB(Op, DAG); 11041 } 11042} 11043 11044static void ReplaceATOMIC_LOAD(SDNode *Node, 11045 SmallVectorImpl<SDValue> &Results, 11046 SelectionDAG &DAG) { 11047 DebugLoc dl = Node->getDebugLoc(); 11048 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 11049 11050 // Convert wide load -> cmpxchg8b/cmpxchg16b 11051 // FIXME: On 32-bit, load -> fild or movq would be more efficient 11052 // (The only way to get a 16-byte load is cmpxchg16b) 11053 // FIXME: 16-byte ATOMIC_CMP_SWAP isn't actually hooked up at the moment. 11054 SDValue Zero = DAG.getConstant(0, VT); 11055 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl, VT, 11056 Node->getOperand(0), 11057 Node->getOperand(1), Zero, Zero, 11058 cast<AtomicSDNode>(Node)->getMemOperand(), 11059 cast<AtomicSDNode>(Node)->getOrdering(), 11060 cast<AtomicSDNode>(Node)->getSynchScope()); 11061 Results.push_back(Swap.getValue(0)); 11062 Results.push_back(Swap.getValue(1)); 11063} 11064 11065void X86TargetLowering:: 11066ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results, 11067 SelectionDAG &DAG, unsigned NewOp) const { 11068 DebugLoc dl = Node->getDebugLoc(); 11069 assert (Node->getValueType(0) == MVT::i64 && 11070 "Only know how to expand i64 atomics"); 11071 11072 SDValue Chain = Node->getOperand(0); 11073 SDValue In1 = Node->getOperand(1); 11074 SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 11075 Node->getOperand(2), DAG.getIntPtrConstant(0)); 11076 SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 11077 Node->getOperand(2), DAG.getIntPtrConstant(1)); 11078 SDValue Ops[] = { Chain, In1, In2L, In2H }; 11079 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 11080 SDValue Result = 11081 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops, 4, MVT::i64, 11082 cast<MemSDNode>(Node)->getMemOperand()); 11083 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)}; 11084 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 11085 Results.push_back(Result.getValue(2)); 11086} 11087 11088/// ReplaceNodeResults - Replace a node with an illegal result type 11089/// with a new node built out of custom code. 11090void X86TargetLowering::ReplaceNodeResults(SDNode *N, 11091 SmallVectorImpl<SDValue>&Results, 11092 SelectionDAG &DAG) const { 11093 DebugLoc dl = N->getDebugLoc(); 11094 switch (N->getOpcode()) { 11095 default: 11096 llvm_unreachable("Do not know how to custom type legalize this operation!"); 11097 case ISD::SIGN_EXTEND_INREG: 11098 case ISD::ADDC: 11099 case ISD::ADDE: 11100 case ISD::SUBC: 11101 case ISD::SUBE: 11102 // We don't want to expand or promote these. 11103 return; 11104 case ISD::FP_TO_SINT: 11105 case ISD::FP_TO_UINT: { 11106 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT; 11107 11108 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType())) 11109 return; 11110 11111 std::pair<SDValue,SDValue> Vals = 11112 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true); 11113 SDValue FIST = Vals.first, StackSlot = Vals.second; 11114 if (FIST.getNode() != 0) { 11115 EVT VT = N->getValueType(0); 11116 // Return a load from the stack slot. 11117 if (StackSlot.getNode() != 0) 11118 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, 11119 MachinePointerInfo(), 11120 false, false, false, 0)); 11121 else 11122 Results.push_back(FIST); 11123 } 11124 return; 11125 } 11126 case ISD::READCYCLECOUNTER: { 11127 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 11128 SDValue TheChain = N->getOperand(0); 11129 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 11130 SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32, 11131 rd.getValue(1)); 11132 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, MVT::i32, 11133 eax.getValue(2)); 11134 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 11135 SDValue Ops[] = { eax, edx }; 11136 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops, 2)); 11137 Results.push_back(edx.getValue(1)); 11138 return; 11139 } 11140 case ISD::ATOMIC_CMP_SWAP: { 11141 EVT T = N->getValueType(0); 11142 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair"); 11143 bool Regs64bit = T == MVT::i128; 11144 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32; 11145 SDValue cpInL, cpInH; 11146 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), 11147 DAG.getConstant(0, HalfT)); 11148 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), 11149 DAG.getConstant(1, HalfT)); 11150 cpInL = DAG.getCopyToReg(N->getOperand(0), dl, 11151 Regs64bit ? X86::RAX : X86::EAX, 11152 cpInL, SDValue()); 11153 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl, 11154 Regs64bit ? X86::RDX : X86::EDX, 11155 cpInH, cpInL.getValue(1)); 11156 SDValue swapInL, swapInH; 11157 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), 11158 DAG.getConstant(0, HalfT)); 11159 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), 11160 DAG.getConstant(1, HalfT)); 11161 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl, 11162 Regs64bit ? X86::RBX : X86::EBX, 11163 swapInL, cpInH.getValue(1)); 11164 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl, 11165 Regs64bit ? X86::RCX : X86::ECX, 11166 swapInH, swapInL.getValue(1)); 11167 SDValue Ops[] = { swapInH.getValue(0), 11168 N->getOperand(1), 11169 swapInH.getValue(1) }; 11170 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 11171 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 11172 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG : 11173 X86ISD::LCMPXCHG8_DAG; 11174 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, 11175 Ops, 3, T, MMO); 11176 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, 11177 Regs64bit ? X86::RAX : X86::EAX, 11178 HalfT, Result.getValue(1)); 11179 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl, 11180 Regs64bit ? X86::RDX : X86::EDX, 11181 HalfT, cpOutL.getValue(2)); 11182 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 11183 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF, 2)); 11184 Results.push_back(cpOutH.getValue(1)); 11185 return; 11186 } 11187 case ISD::ATOMIC_LOAD_ADD: 11188 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMADD64_DAG); 11189 return; 11190 case ISD::ATOMIC_LOAD_AND: 11191 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMAND64_DAG); 11192 return; 11193 case ISD::ATOMIC_LOAD_NAND: 11194 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMNAND64_DAG); 11195 return; 11196 case ISD::ATOMIC_LOAD_OR: 11197 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMOR64_DAG); 11198 return; 11199 case ISD::ATOMIC_LOAD_SUB: 11200 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSUB64_DAG); 11201 return; 11202 case ISD::ATOMIC_LOAD_XOR: 11203 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMXOR64_DAG); 11204 return; 11205 case ISD::ATOMIC_SWAP: 11206 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSWAP64_DAG); 11207 return; 11208 case ISD::ATOMIC_LOAD: 11209 ReplaceATOMIC_LOAD(N, Results, DAG); 11210 } 11211} 11212 11213const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 11214 switch (Opcode) { 11215 default: return NULL; 11216 case X86ISD::BSF: return "X86ISD::BSF"; 11217 case X86ISD::BSR: return "X86ISD::BSR"; 11218 case X86ISD::SHLD: return "X86ISD::SHLD"; 11219 case X86ISD::SHRD: return "X86ISD::SHRD"; 11220 case X86ISD::FAND: return "X86ISD::FAND"; 11221 case X86ISD::FOR: return "X86ISD::FOR"; 11222 case X86ISD::FXOR: return "X86ISD::FXOR"; 11223 case X86ISD::FSRL: return "X86ISD::FSRL"; 11224 case X86ISD::FILD: return "X86ISD::FILD"; 11225 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 11226 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 11227 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 11228 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 11229 case X86ISD::FLD: return "X86ISD::FLD"; 11230 case X86ISD::FST: return "X86ISD::FST"; 11231 case X86ISD::CALL: return "X86ISD::CALL"; 11232 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 11233 case X86ISD::BT: return "X86ISD::BT"; 11234 case X86ISD::CMP: return "X86ISD::CMP"; 11235 case X86ISD::COMI: return "X86ISD::COMI"; 11236 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 11237 case X86ISD::SETCC: return "X86ISD::SETCC"; 11238 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY"; 11239 case X86ISD::FSETCCsd: return "X86ISD::FSETCCsd"; 11240 case X86ISD::FSETCCss: return "X86ISD::FSETCCss"; 11241 case X86ISD::CMOV: return "X86ISD::CMOV"; 11242 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 11243 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 11244 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 11245 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 11246 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 11247 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 11248 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP"; 11249 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 11250 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 11251 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 11252 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 11253 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 11254 case X86ISD::PSHUFB: return "X86ISD::PSHUFB"; 11255 case X86ISD::ANDNP: return "X86ISD::ANDNP"; 11256 case X86ISD::PSIGN: return "X86ISD::PSIGN"; 11257 case X86ISD::BLENDV: return "X86ISD::BLENDV"; 11258 case X86ISD::BLENDPW: return "X86ISD::BLENDPW"; 11259 case X86ISD::BLENDPS: return "X86ISD::BLENDPS"; 11260 case X86ISD::BLENDPD: return "X86ISD::BLENDPD"; 11261 case X86ISD::HADD: return "X86ISD::HADD"; 11262 case X86ISD::HSUB: return "X86ISD::HSUB"; 11263 case X86ISD::FHADD: return "X86ISD::FHADD"; 11264 case X86ISD::FHSUB: return "X86ISD::FHSUB"; 11265 case X86ISD::FMAX: return "X86ISD::FMAX"; 11266 case X86ISD::FMIN: return "X86ISD::FMIN"; 11267 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 11268 case X86ISD::FRCP: return "X86ISD::FRCP"; 11269 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 11270 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR"; 11271 case X86ISD::TLSCALL: return "X86ISD::TLSCALL"; 11272 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 11273 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 11274 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 11275 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r"; 11276 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG"; 11277 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG"; 11278 case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG"; 11279 case X86ISD::ATOMSUB64_DAG: return "X86ISD::ATOMSUB64_DAG"; 11280 case X86ISD::ATOMOR64_DAG: return "X86ISD::ATOMOR64_DAG"; 11281 case X86ISD::ATOMXOR64_DAG: return "X86ISD::ATOMXOR64_DAG"; 11282 case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG"; 11283 case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG"; 11284 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL"; 11285 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD"; 11286 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ"; 11287 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ"; 11288 case X86ISD::VSHL: return "X86ISD::VSHL"; 11289 case X86ISD::VSRL: return "X86ISD::VSRL"; 11290 case X86ISD::VSRA: return "X86ISD::VSRA"; 11291 case X86ISD::VSHLI: return "X86ISD::VSHLI"; 11292 case X86ISD::VSRLI: return "X86ISD::VSRLI"; 11293 case X86ISD::VSRAI: return "X86ISD::VSRAI"; 11294 case X86ISD::CMPP: return "X86ISD::CMPP"; 11295 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ"; 11296 case X86ISD::PCMPGT: return "X86ISD::PCMPGT"; 11297 case X86ISD::ADD: return "X86ISD::ADD"; 11298 case X86ISD::SUB: return "X86ISD::SUB"; 11299 case X86ISD::ADC: return "X86ISD::ADC"; 11300 case X86ISD::SBB: return "X86ISD::SBB"; 11301 case X86ISD::SMUL: return "X86ISD::SMUL"; 11302 case X86ISD::UMUL: return "X86ISD::UMUL"; 11303 case X86ISD::INC: return "X86ISD::INC"; 11304 case X86ISD::DEC: return "X86ISD::DEC"; 11305 case X86ISD::OR: return "X86ISD::OR"; 11306 case X86ISD::XOR: return "X86ISD::XOR"; 11307 case X86ISD::AND: return "X86ISD::AND"; 11308 case X86ISD::ANDN: return "X86ISD::ANDN"; 11309 case X86ISD::BLSI: return "X86ISD::BLSI"; 11310 case X86ISD::BLSMSK: return "X86ISD::BLSMSK"; 11311 case X86ISD::BLSR: return "X86ISD::BLSR"; 11312 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM"; 11313 case X86ISD::PTEST: return "X86ISD::PTEST"; 11314 case X86ISD::TESTP: return "X86ISD::TESTP"; 11315 case X86ISD::PALIGN: return "X86ISD::PALIGN"; 11316 case X86ISD::PSHUFD: return "X86ISD::PSHUFD"; 11317 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW"; 11318 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW"; 11319 case X86ISD::SHUFP: return "X86ISD::SHUFP"; 11320 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS"; 11321 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD"; 11322 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS"; 11323 case X86ISD::MOVLPS: return "X86ISD::MOVLPS"; 11324 case X86ISD::MOVLPD: return "X86ISD::MOVLPD"; 11325 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP"; 11326 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP"; 11327 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP"; 11328 case X86ISD::MOVSD: return "X86ISD::MOVSD"; 11329 case X86ISD::MOVSS: return "X86ISD::MOVSS"; 11330 case X86ISD::UNPCKL: return "X86ISD::UNPCKL"; 11331 case X86ISD::UNPCKH: return "X86ISD::UNPCKH"; 11332 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST"; 11333 case X86ISD::VPERMILP: return "X86ISD::VPERMILP"; 11334 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128"; 11335 case X86ISD::VPERMV: return "X86ISD::VPERMV"; 11336 case X86ISD::VPERMI: return "X86ISD::VPERMI"; 11337 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ"; 11338 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS"; 11339 case X86ISD::VAARG_64: return "X86ISD::VAARG_64"; 11340 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA"; 11341 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER"; 11342 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA"; 11343 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL"; 11344 case X86ISD::SAHF: return "X86ISD::SAHF"; 11345 case X86ISD::RDRAND: return "X86ISD::RDRAND"; 11346 case X86ISD::FMADD: return "X86ISD::FMADD"; 11347 case X86ISD::FMSUB: return "X86ISD::FMSUB"; 11348 case X86ISD::FNMADD: return "X86ISD::FNMADD"; 11349 case X86ISD::FNMSUB: return "X86ISD::FNMSUB"; 11350 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB"; 11351 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD"; 11352 } 11353} 11354 11355// isLegalAddressingMode - Return true if the addressing mode represented 11356// by AM is legal for this target, for a load/store of the specified type. 11357bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 11358 Type *Ty) const { 11359 // X86 supports extremely general addressing modes. 11360 CodeModel::Model M = getTargetMachine().getCodeModel(); 11361 Reloc::Model R = getTargetMachine().getRelocationModel(); 11362 11363 // X86 allows a sign-extended 32-bit immediate field as a displacement. 11364 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL)) 11365 return false; 11366 11367 if (AM.BaseGV) { 11368 unsigned GVFlags = 11369 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine()); 11370 11371 // If a reference to this global requires an extra load, we can't fold it. 11372 if (isGlobalStubReference(GVFlags)) 11373 return false; 11374 11375 // If BaseGV requires a register for the PIC base, we cannot also have a 11376 // BaseReg specified. 11377 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags)) 11378 return false; 11379 11380 // If lower 4G is not available, then we must use rip-relative addressing. 11381 if ((M != CodeModel::Small || R != Reloc::Static) && 11382 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1)) 11383 return false; 11384 } 11385 11386 switch (AM.Scale) { 11387 case 0: 11388 case 1: 11389 case 2: 11390 case 4: 11391 case 8: 11392 // These scales always work. 11393 break; 11394 case 3: 11395 case 5: 11396 case 9: 11397 // These scales are formed with basereg+scalereg. Only accept if there is 11398 // no basereg yet. 11399 if (AM.HasBaseReg) 11400 return false; 11401 break; 11402 default: // Other stuff never works. 11403 return false; 11404 } 11405 11406 return true; 11407} 11408 11409 11410bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 11411 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 11412 return false; 11413 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 11414 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 11415 if (NumBits1 <= NumBits2) 11416 return false; 11417 return true; 11418} 11419 11420bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const { 11421 return Imm == (int32_t)Imm; 11422} 11423 11424bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const { 11425 // Can also use sub to handle negated immediates. 11426 return Imm == (int32_t)Imm; 11427} 11428 11429bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 11430 if (!VT1.isInteger() || !VT2.isInteger()) 11431 return false; 11432 unsigned NumBits1 = VT1.getSizeInBits(); 11433 unsigned NumBits2 = VT2.getSizeInBits(); 11434 if (NumBits1 <= NumBits2) 11435 return false; 11436 return true; 11437} 11438 11439bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { 11440 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 11441 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit(); 11442} 11443 11444bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { 11445 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 11446 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit(); 11447} 11448 11449bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const { 11450 // i16 instructions are longer (0x66 prefix) and potentially slower. 11451 return !(VT1 == MVT::i32 && VT2 == MVT::i16); 11452} 11453 11454/// isShuffleMaskLegal - Targets can use this to indicate that they only 11455/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 11456/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 11457/// are assumed to be legal. 11458bool 11459X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 11460 EVT VT) const { 11461 // Very little shuffling can be done for 64-bit vectors right now. 11462 if (VT.getSizeInBits() == 64) 11463 return false; 11464 11465 // FIXME: pshufb, blends, shifts. 11466 return (VT.getVectorNumElements() == 2 || 11467 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 11468 isMOVLMask(M, VT) || 11469 isSHUFPMask(M, VT, Subtarget->hasAVX()) || 11470 isPSHUFDMask(M, VT) || 11471 isPSHUFHWMask(M, VT, Subtarget->hasAVX2()) || 11472 isPSHUFLWMask(M, VT, Subtarget->hasAVX2()) || 11473 isPALIGNRMask(M, VT, Subtarget) || 11474 isUNPCKLMask(M, VT, Subtarget->hasAVX2()) || 11475 isUNPCKHMask(M, VT, Subtarget->hasAVX2()) || 11476 isUNPCKL_v_undef_Mask(M, VT, Subtarget->hasAVX2()) || 11477 isUNPCKH_v_undef_Mask(M, VT, Subtarget->hasAVX2())); 11478} 11479 11480bool 11481X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, 11482 EVT VT) const { 11483 unsigned NumElts = VT.getVectorNumElements(); 11484 // FIXME: This collection of masks seems suspect. 11485 if (NumElts == 2) 11486 return true; 11487 if (NumElts == 4 && VT.is128BitVector()) { 11488 return (isMOVLMask(Mask, VT) || 11489 isCommutedMOVLMask(Mask, VT, true) || 11490 isSHUFPMask(Mask, VT, Subtarget->hasAVX()) || 11491 isSHUFPMask(Mask, VT, Subtarget->hasAVX(), /* Commuted */ true)); 11492 } 11493 return false; 11494} 11495 11496//===----------------------------------------------------------------------===// 11497// X86 Scheduler Hooks 11498//===----------------------------------------------------------------------===// 11499 11500// private utility function 11501MachineBasicBlock * 11502X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr, 11503 MachineBasicBlock *MBB, 11504 unsigned regOpc, 11505 unsigned immOpc, 11506 unsigned LoadOpc, 11507 unsigned CXchgOpc, 11508 unsigned notOpc, 11509 unsigned EAXreg, 11510 const TargetRegisterClass *RC, 11511 bool Invert) const { 11512 // For the atomic bitwise operator, we generate 11513 // thisMBB: 11514 // newMBB: 11515 // ld t1 = [bitinstr.addr] 11516 // op t2 = t1, [bitinstr.val] 11517 // not t3 = t2 (if Invert) 11518 // mov EAX = t1 11519 // lcs dest = [bitinstr.addr], t3 [EAX is implicit] 11520 // bz newMBB 11521 // fallthrough -->nextMBB 11522 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11523 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 11524 MachineFunction::iterator MBBIter = MBB; 11525 ++MBBIter; 11526 11527 /// First build the CFG 11528 MachineFunction *F = MBB->getParent(); 11529 MachineBasicBlock *thisMBB = MBB; 11530 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 11531 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 11532 F->insert(MBBIter, newMBB); 11533 F->insert(MBBIter, nextMBB); 11534 11535 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 11536 nextMBB->splice(nextMBB->begin(), thisMBB, 11537 llvm::next(MachineBasicBlock::iterator(bInstr)), 11538 thisMBB->end()); 11539 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 11540 11541 // Update thisMBB to fall through to newMBB 11542 thisMBB->addSuccessor(newMBB); 11543 11544 // newMBB jumps to itself and fall through to nextMBB 11545 newMBB->addSuccessor(nextMBB); 11546 newMBB->addSuccessor(newMBB); 11547 11548 // Insert instructions into newMBB based on incoming instruction 11549 assert(bInstr->getNumOperands() < X86::AddrNumOperands + 4 && 11550 "unexpected number of operands"); 11551 DebugLoc dl = bInstr->getDebugLoc(); 11552 MachineOperand& destOper = bInstr->getOperand(0); 11553 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 11554 int numArgs = bInstr->getNumOperands() - 1; 11555 for (int i=0; i < numArgs; ++i) 11556 argOpers[i] = &bInstr->getOperand(i+1); 11557 11558 // x86 address has 4 operands: base, index, scale, and displacement 11559 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 11560 int valArgIndx = lastAddrIndx + 1; 11561 11562 unsigned t1 = F->getRegInfo().createVirtualRegister(RC); 11563 MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(LoadOpc), t1); 11564 for (int i=0; i <= lastAddrIndx; ++i) 11565 (*MIB).addOperand(*argOpers[i]); 11566 11567 unsigned t2 = F->getRegInfo().createVirtualRegister(RC); 11568 assert((argOpers[valArgIndx]->isReg() || 11569 argOpers[valArgIndx]->isImm()) && 11570 "invalid operand"); 11571 if (argOpers[valArgIndx]->isReg()) 11572 MIB = BuildMI(newMBB, dl, TII->get(regOpc), t2); 11573 else 11574 MIB = BuildMI(newMBB, dl, TII->get(immOpc), t2); 11575 MIB.addReg(t1); 11576 (*MIB).addOperand(*argOpers[valArgIndx]); 11577 11578 unsigned t3 = F->getRegInfo().createVirtualRegister(RC); 11579 if (Invert) { 11580 MIB = BuildMI(newMBB, dl, TII->get(notOpc), t3).addReg(t2); 11581 } 11582 else 11583 t3 = t2; 11584 11585 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), EAXreg); 11586 MIB.addReg(t1); 11587 11588 MIB = BuildMI(newMBB, dl, TII->get(CXchgOpc)); 11589 for (int i=0; i <= lastAddrIndx; ++i) 11590 (*MIB).addOperand(*argOpers[i]); 11591 MIB.addReg(t3); 11592 assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 11593 (*MIB).setMemRefs(bInstr->memoperands_begin(), 11594 bInstr->memoperands_end()); 11595 11596 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg()); 11597 MIB.addReg(EAXreg); 11598 11599 // insert branch 11600 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 11601 11602 bInstr->eraseFromParent(); // The pseudo instruction is gone now. 11603 return nextMBB; 11604} 11605 11606// private utility function: 64 bit atomics on 32 bit host. 11607MachineBasicBlock * 11608X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr, 11609 MachineBasicBlock *MBB, 11610 unsigned regOpcL, 11611 unsigned regOpcH, 11612 unsigned immOpcL, 11613 unsigned immOpcH, 11614 bool Invert) const { 11615 // For the atomic bitwise operator, we generate 11616 // thisMBB (instructions are in pairs, except cmpxchg8b) 11617 // ld t1,t2 = [bitinstr.addr] 11618 // newMBB: 11619 // out1, out2 = phi (thisMBB, t1/t2) (newMBB, t3/t4) 11620 // op t5, t6 <- out1, out2, [bitinstr.val] 11621 // (for SWAP, substitute: mov t5, t6 <- [bitinstr.val]) 11622 // neg t7, t8 < t5, t6 (if Invert) 11623 // mov ECX, EBX <- t5, t6 11624 // mov EAX, EDX <- t1, t2 11625 // cmpxchg8b [bitinstr.addr] [EAX, EDX, EBX, ECX implicit] 11626 // mov t3, t4 <- EAX, EDX 11627 // bz newMBB 11628 // result in out1, out2 11629 // fallthrough -->nextMBB 11630 11631 const TargetRegisterClass *RC = &X86::GR32RegClass; 11632 const unsigned LoadOpc = X86::MOV32rm; 11633 const unsigned NotOpc = X86::NOT32r; 11634 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11635 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 11636 MachineFunction::iterator MBBIter = MBB; 11637 ++MBBIter; 11638 11639 /// First build the CFG 11640 MachineFunction *F = MBB->getParent(); 11641 MachineBasicBlock *thisMBB = MBB; 11642 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 11643 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 11644 F->insert(MBBIter, newMBB); 11645 F->insert(MBBIter, nextMBB); 11646 11647 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 11648 nextMBB->splice(nextMBB->begin(), thisMBB, 11649 llvm::next(MachineBasicBlock::iterator(bInstr)), 11650 thisMBB->end()); 11651 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 11652 11653 // Update thisMBB to fall through to newMBB 11654 thisMBB->addSuccessor(newMBB); 11655 11656 // newMBB jumps to itself and fall through to nextMBB 11657 newMBB->addSuccessor(nextMBB); 11658 newMBB->addSuccessor(newMBB); 11659 11660 DebugLoc dl = bInstr->getDebugLoc(); 11661 // Insert instructions into newMBB based on incoming instruction 11662 // There are 8 "real" operands plus 9 implicit def/uses, ignored here. 11663 assert(bInstr->getNumOperands() < X86::AddrNumOperands + 14 && 11664 "unexpected number of operands"); 11665 MachineOperand& dest1Oper = bInstr->getOperand(0); 11666 MachineOperand& dest2Oper = bInstr->getOperand(1); 11667 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 11668 for (int i=0; i < 2 + X86::AddrNumOperands; ++i) { 11669 argOpers[i] = &bInstr->getOperand(i+2); 11670 11671 // We use some of the operands multiple times, so conservatively just 11672 // clear any kill flags that might be present. 11673 if (argOpers[i]->isReg() && argOpers[i]->isUse()) 11674 argOpers[i]->setIsKill(false); 11675 } 11676 11677 // x86 address has 5 operands: base, index, scale, displacement, and segment. 11678 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 11679 11680 unsigned t1 = F->getRegInfo().createVirtualRegister(RC); 11681 MachineInstrBuilder MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t1); 11682 for (int i=0; i <= lastAddrIndx; ++i) 11683 (*MIB).addOperand(*argOpers[i]); 11684 unsigned t2 = F->getRegInfo().createVirtualRegister(RC); 11685 MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t2); 11686 // add 4 to displacement. 11687 for (int i=0; i <= lastAddrIndx-2; ++i) 11688 (*MIB).addOperand(*argOpers[i]); 11689 MachineOperand newOp3 = *(argOpers[3]); 11690 if (newOp3.isImm()) 11691 newOp3.setImm(newOp3.getImm()+4); 11692 else 11693 newOp3.setOffset(newOp3.getOffset()+4); 11694 (*MIB).addOperand(newOp3); 11695 (*MIB).addOperand(*argOpers[lastAddrIndx]); 11696 11697 // t3/4 are defined later, at the bottom of the loop 11698 unsigned t3 = F->getRegInfo().createVirtualRegister(RC); 11699 unsigned t4 = F->getRegInfo().createVirtualRegister(RC); 11700 BuildMI(newMBB, dl, TII->get(X86::PHI), dest1Oper.getReg()) 11701 .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(newMBB); 11702 BuildMI(newMBB, dl, TII->get(X86::PHI), dest2Oper.getReg()) 11703 .addReg(t2).addMBB(thisMBB).addReg(t4).addMBB(newMBB); 11704 11705 // The subsequent operations should be using the destination registers of 11706 // the PHI instructions. 11707 t1 = dest1Oper.getReg(); 11708 t2 = dest2Oper.getReg(); 11709 11710 int valArgIndx = lastAddrIndx + 1; 11711 assert((argOpers[valArgIndx]->isReg() || 11712 argOpers[valArgIndx]->isImm()) && 11713 "invalid operand"); 11714 unsigned t5 = F->getRegInfo().createVirtualRegister(RC); 11715 unsigned t6 = F->getRegInfo().createVirtualRegister(RC); 11716 if (argOpers[valArgIndx]->isReg()) 11717 MIB = BuildMI(newMBB, dl, TII->get(regOpcL), t5); 11718 else 11719 MIB = BuildMI(newMBB, dl, TII->get(immOpcL), t5); 11720 if (regOpcL != X86::MOV32rr) 11721 MIB.addReg(t1); 11722 (*MIB).addOperand(*argOpers[valArgIndx]); 11723 assert(argOpers[valArgIndx + 1]->isReg() == 11724 argOpers[valArgIndx]->isReg()); 11725 assert(argOpers[valArgIndx + 1]->isImm() == 11726 argOpers[valArgIndx]->isImm()); 11727 if (argOpers[valArgIndx + 1]->isReg()) 11728 MIB = BuildMI(newMBB, dl, TII->get(regOpcH), t6); 11729 else 11730 MIB = BuildMI(newMBB, dl, TII->get(immOpcH), t6); 11731 if (regOpcH != X86::MOV32rr) 11732 MIB.addReg(t2); 11733 (*MIB).addOperand(*argOpers[valArgIndx + 1]); 11734 11735 unsigned t7, t8; 11736 if (Invert) { 11737 t7 = F->getRegInfo().createVirtualRegister(RC); 11738 t8 = F->getRegInfo().createVirtualRegister(RC); 11739 MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t7).addReg(t5); 11740 MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t8).addReg(t6); 11741 } else { 11742 t7 = t5; 11743 t8 = t6; 11744 } 11745 11746 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX); 11747 MIB.addReg(t1); 11748 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EDX); 11749 MIB.addReg(t2); 11750 11751 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EBX); 11752 MIB.addReg(t7); 11753 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::ECX); 11754 MIB.addReg(t8); 11755 11756 MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG8B)); 11757 for (int i=0; i <= lastAddrIndx; ++i) 11758 (*MIB).addOperand(*argOpers[i]); 11759 11760 assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 11761 (*MIB).setMemRefs(bInstr->memoperands_begin(), 11762 bInstr->memoperands_end()); 11763 11764 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t3); 11765 MIB.addReg(X86::EAX); 11766 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t4); 11767 MIB.addReg(X86::EDX); 11768 11769 // insert branch 11770 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 11771 11772 bInstr->eraseFromParent(); // The pseudo instruction is gone now. 11773 return nextMBB; 11774} 11775 11776// private utility function 11777MachineBasicBlock * 11778X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr, 11779 MachineBasicBlock *MBB, 11780 unsigned cmovOpc) const { 11781 // For the atomic min/max operator, we generate 11782 // thisMBB: 11783 // newMBB: 11784 // ld t1 = [min/max.addr] 11785 // mov t2 = [min/max.val] 11786 // cmp t1, t2 11787 // cmov[cond] t2 = t1 11788 // mov EAX = t1 11789 // lcs dest = [bitinstr.addr], t2 [EAX is implicit] 11790 // bz newMBB 11791 // fallthrough -->nextMBB 11792 // 11793 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11794 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 11795 MachineFunction::iterator MBBIter = MBB; 11796 ++MBBIter; 11797 11798 /// First build the CFG 11799 MachineFunction *F = MBB->getParent(); 11800 MachineBasicBlock *thisMBB = MBB; 11801 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 11802 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 11803 F->insert(MBBIter, newMBB); 11804 F->insert(MBBIter, nextMBB); 11805 11806 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 11807 nextMBB->splice(nextMBB->begin(), thisMBB, 11808 llvm::next(MachineBasicBlock::iterator(mInstr)), 11809 thisMBB->end()); 11810 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 11811 11812 // Update thisMBB to fall through to newMBB 11813 thisMBB->addSuccessor(newMBB); 11814 11815 // newMBB jumps to newMBB and fall through to nextMBB 11816 newMBB->addSuccessor(nextMBB); 11817 newMBB->addSuccessor(newMBB); 11818 11819 DebugLoc dl = mInstr->getDebugLoc(); 11820 // Insert instructions into newMBB based on incoming instruction 11821 assert(mInstr->getNumOperands() < X86::AddrNumOperands + 4 && 11822 "unexpected number of operands"); 11823 MachineOperand& destOper = mInstr->getOperand(0); 11824 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 11825 int numArgs = mInstr->getNumOperands() - 1; 11826 for (int i=0; i < numArgs; ++i) 11827 argOpers[i] = &mInstr->getOperand(i+1); 11828 11829 // x86 address has 4 operands: base, index, scale, and displacement 11830 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 11831 int valArgIndx = lastAddrIndx + 1; 11832 11833 unsigned t1 = F->getRegInfo().createVirtualRegister(&X86::GR32RegClass); 11834 MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rm), t1); 11835 for (int i=0; i <= lastAddrIndx; ++i) 11836 (*MIB).addOperand(*argOpers[i]); 11837 11838 // We only support register and immediate values 11839 assert((argOpers[valArgIndx]->isReg() || 11840 argOpers[valArgIndx]->isImm()) && 11841 "invalid operand"); 11842 11843 unsigned t2 = F->getRegInfo().createVirtualRegister(&X86::GR32RegClass); 11844 if (argOpers[valArgIndx]->isReg()) 11845 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t2); 11846 else 11847 MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), t2); 11848 (*MIB).addOperand(*argOpers[valArgIndx]); 11849 11850 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX); 11851 MIB.addReg(t1); 11852 11853 MIB = BuildMI(newMBB, dl, TII->get(X86::CMP32rr)); 11854 MIB.addReg(t1); 11855 MIB.addReg(t2); 11856 11857 // Generate movc 11858 unsigned t3 = F->getRegInfo().createVirtualRegister(&X86::GR32RegClass); 11859 MIB = BuildMI(newMBB, dl, TII->get(cmovOpc),t3); 11860 MIB.addReg(t2); 11861 MIB.addReg(t1); 11862 11863 // Cmp and exchange if none has modified the memory location 11864 MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG32)); 11865 for (int i=0; i <= lastAddrIndx; ++i) 11866 (*MIB).addOperand(*argOpers[i]); 11867 MIB.addReg(t3); 11868 assert(mInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 11869 (*MIB).setMemRefs(mInstr->memoperands_begin(), 11870 mInstr->memoperands_end()); 11871 11872 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg()); 11873 MIB.addReg(X86::EAX); 11874 11875 // insert branch 11876 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 11877 11878 mInstr->eraseFromParent(); // The pseudo instruction is gone now. 11879 return nextMBB; 11880} 11881 11882// FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8 11883// or XMM0_V32I8 in AVX all of this code can be replaced with that 11884// in the .td file. 11885MachineBasicBlock * 11886X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB, 11887 unsigned numArgs, bool memArg) const { 11888 assert(Subtarget->hasSSE42() && 11889 "Target must have SSE4.2 or AVX features enabled"); 11890 11891 DebugLoc dl = MI->getDebugLoc(); 11892 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11893 unsigned Opc; 11894 if (!Subtarget->hasAVX()) { 11895 if (memArg) 11896 Opc = numArgs == 3 ? X86::PCMPISTRM128rm : X86::PCMPESTRM128rm; 11897 else 11898 Opc = numArgs == 3 ? X86::PCMPISTRM128rr : X86::PCMPESTRM128rr; 11899 } else { 11900 if (memArg) 11901 Opc = numArgs == 3 ? X86::VPCMPISTRM128rm : X86::VPCMPESTRM128rm; 11902 else 11903 Opc = numArgs == 3 ? X86::VPCMPISTRM128rr : X86::VPCMPESTRM128rr; 11904 } 11905 11906 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc)); 11907 for (unsigned i = 0; i < numArgs; ++i) { 11908 MachineOperand &Op = MI->getOperand(i+1); 11909 if (!(Op.isReg() && Op.isImplicit())) 11910 MIB.addOperand(Op); 11911 } 11912 BuildMI(*BB, MI, dl, 11913 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) 11914 .addReg(X86::XMM0); 11915 11916 MI->eraseFromParent(); 11917 return BB; 11918} 11919 11920MachineBasicBlock * 11921X86TargetLowering::EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB) const { 11922 DebugLoc dl = MI->getDebugLoc(); 11923 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11924 11925 // Address into RAX/EAX, other two args into ECX, EDX. 11926 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; 11927 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 11928 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg); 11929 for (int i = 0; i < X86::AddrNumOperands; ++i) 11930 MIB.addOperand(MI->getOperand(i)); 11931 11932 unsigned ValOps = X86::AddrNumOperands; 11933 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) 11934 .addReg(MI->getOperand(ValOps).getReg()); 11935 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX) 11936 .addReg(MI->getOperand(ValOps+1).getReg()); 11937 11938 // The instruction doesn't actually take any operands though. 11939 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr)); 11940 11941 MI->eraseFromParent(); // The pseudo is gone now. 11942 return BB; 11943} 11944 11945MachineBasicBlock * 11946X86TargetLowering::EmitVAARG64WithCustomInserter( 11947 MachineInstr *MI, 11948 MachineBasicBlock *MBB) const { 11949 // Emit va_arg instruction on X86-64. 11950 11951 // Operands to this pseudo-instruction: 11952 // 0 ) Output : destination address (reg) 11953 // 1-5) Input : va_list address (addr, i64mem) 11954 // 6 ) ArgSize : Size (in bytes) of vararg type 11955 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset 11956 // 8 ) Align : Alignment of type 11957 // 9 ) EFLAGS (implicit-def) 11958 11959 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!"); 11960 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands"); 11961 11962 unsigned DestReg = MI->getOperand(0).getReg(); 11963 MachineOperand &Base = MI->getOperand(1); 11964 MachineOperand &Scale = MI->getOperand(2); 11965 MachineOperand &Index = MI->getOperand(3); 11966 MachineOperand &Disp = MI->getOperand(4); 11967 MachineOperand &Segment = MI->getOperand(5); 11968 unsigned ArgSize = MI->getOperand(6).getImm(); 11969 unsigned ArgMode = MI->getOperand(7).getImm(); 11970 unsigned Align = MI->getOperand(8).getImm(); 11971 11972 // Memory Reference 11973 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand"); 11974 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 11975 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 11976 11977 // Machine Information 11978 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11979 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 11980 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64); 11981 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32); 11982 DebugLoc DL = MI->getDebugLoc(); 11983 11984 // struct va_list { 11985 // i32 gp_offset 11986 // i32 fp_offset 11987 // i64 overflow_area (address) 11988 // i64 reg_save_area (address) 11989 // } 11990 // sizeof(va_list) = 24 11991 // alignment(va_list) = 8 11992 11993 unsigned TotalNumIntRegs = 6; 11994 unsigned TotalNumXMMRegs = 8; 11995 bool UseGPOffset = (ArgMode == 1); 11996 bool UseFPOffset = (ArgMode == 2); 11997 unsigned MaxOffset = TotalNumIntRegs * 8 + 11998 (UseFPOffset ? TotalNumXMMRegs * 16 : 0); 11999 12000 /* Align ArgSize to a multiple of 8 */ 12001 unsigned ArgSizeA8 = (ArgSize + 7) & ~7; 12002 bool NeedsAlign = (Align > 8); 12003 12004 MachineBasicBlock *thisMBB = MBB; 12005 MachineBasicBlock *overflowMBB; 12006 MachineBasicBlock *offsetMBB; 12007 MachineBasicBlock *endMBB; 12008 12009 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB 12010 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB 12011 unsigned OffsetReg = 0; 12012 12013 if (!UseGPOffset && !UseFPOffset) { 12014 // If we only pull from the overflow region, we don't create a branch. 12015 // We don't need to alter control flow. 12016 OffsetDestReg = 0; // unused 12017 OverflowDestReg = DestReg; 12018 12019 offsetMBB = NULL; 12020 overflowMBB = thisMBB; 12021 endMBB = thisMBB; 12022 } else { 12023 // First emit code to check if gp_offset (or fp_offset) is below the bound. 12024 // If so, pull the argument from reg_save_area. (branch to offsetMBB) 12025 // If not, pull from overflow_area. (branch to overflowMBB) 12026 // 12027 // thisMBB 12028 // | . 12029 // | . 12030 // offsetMBB overflowMBB 12031 // | . 12032 // | . 12033 // endMBB 12034 12035 // Registers for the PHI in endMBB 12036 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass); 12037 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass); 12038 12039 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 12040 MachineFunction *MF = MBB->getParent(); 12041 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12042 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12043 endMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12044 12045 MachineFunction::iterator MBBIter = MBB; 12046 ++MBBIter; 12047 12048 // Insert the new basic blocks 12049 MF->insert(MBBIter, offsetMBB); 12050 MF->insert(MBBIter, overflowMBB); 12051 MF->insert(MBBIter, endMBB); 12052 12053 // Transfer the remainder of MBB and its successor edges to endMBB. 12054 endMBB->splice(endMBB->begin(), thisMBB, 12055 llvm::next(MachineBasicBlock::iterator(MI)), 12056 thisMBB->end()); 12057 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 12058 12059 // Make offsetMBB and overflowMBB successors of thisMBB 12060 thisMBB->addSuccessor(offsetMBB); 12061 thisMBB->addSuccessor(overflowMBB); 12062 12063 // endMBB is a successor of both offsetMBB and overflowMBB 12064 offsetMBB->addSuccessor(endMBB); 12065 overflowMBB->addSuccessor(endMBB); 12066 12067 // Load the offset value into a register 12068 OffsetReg = MRI.createVirtualRegister(OffsetRegClass); 12069 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg) 12070 .addOperand(Base) 12071 .addOperand(Scale) 12072 .addOperand(Index) 12073 .addDisp(Disp, UseFPOffset ? 4 : 0) 12074 .addOperand(Segment) 12075 .setMemRefs(MMOBegin, MMOEnd); 12076 12077 // Check if there is enough room left to pull this argument. 12078 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri)) 12079 .addReg(OffsetReg) 12080 .addImm(MaxOffset + 8 - ArgSizeA8); 12081 12082 // Branch to "overflowMBB" if offset >= max 12083 // Fall through to "offsetMBB" otherwise 12084 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE))) 12085 .addMBB(overflowMBB); 12086 } 12087 12088 // In offsetMBB, emit code to use the reg_save_area. 12089 if (offsetMBB) { 12090 assert(OffsetReg != 0); 12091 12092 // Read the reg_save_area address. 12093 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass); 12094 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg) 12095 .addOperand(Base) 12096 .addOperand(Scale) 12097 .addOperand(Index) 12098 .addDisp(Disp, 16) 12099 .addOperand(Segment) 12100 .setMemRefs(MMOBegin, MMOEnd); 12101 12102 // Zero-extend the offset 12103 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); 12104 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64) 12105 .addImm(0) 12106 .addReg(OffsetReg) 12107 .addImm(X86::sub_32bit); 12108 12109 // Add the offset to the reg_save_area to get the final address. 12110 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg) 12111 .addReg(OffsetReg64) 12112 .addReg(RegSaveReg); 12113 12114 // Compute the offset for the next argument 12115 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass); 12116 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg) 12117 .addReg(OffsetReg) 12118 .addImm(UseFPOffset ? 16 : 8); 12119 12120 // Store it back into the va_list. 12121 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr)) 12122 .addOperand(Base) 12123 .addOperand(Scale) 12124 .addOperand(Index) 12125 .addDisp(Disp, UseFPOffset ? 4 : 0) 12126 .addOperand(Segment) 12127 .addReg(NextOffsetReg) 12128 .setMemRefs(MMOBegin, MMOEnd); 12129 12130 // Jump to endMBB 12131 BuildMI(offsetMBB, DL, TII->get(X86::JMP_4)) 12132 .addMBB(endMBB); 12133 } 12134 12135 // 12136 // Emit code to use overflow area 12137 // 12138 12139 // Load the overflow_area address into a register. 12140 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass); 12141 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg) 12142 .addOperand(Base) 12143 .addOperand(Scale) 12144 .addOperand(Index) 12145 .addDisp(Disp, 8) 12146 .addOperand(Segment) 12147 .setMemRefs(MMOBegin, MMOEnd); 12148 12149 // If we need to align it, do so. Otherwise, just copy the address 12150 // to OverflowDestReg. 12151 if (NeedsAlign) { 12152 // Align the overflow address 12153 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2"); 12154 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass); 12155 12156 // aligned_addr = (addr + (align-1)) & ~(align-1) 12157 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg) 12158 .addReg(OverflowAddrReg) 12159 .addImm(Align-1); 12160 12161 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg) 12162 .addReg(TmpReg) 12163 .addImm(~(uint64_t)(Align-1)); 12164 } else { 12165 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg) 12166 .addReg(OverflowAddrReg); 12167 } 12168 12169 // Compute the next overflow address after this argument. 12170 // (the overflow address should be kept 8-byte aligned) 12171 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass); 12172 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg) 12173 .addReg(OverflowDestReg) 12174 .addImm(ArgSizeA8); 12175 12176 // Store the new overflow address. 12177 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr)) 12178 .addOperand(Base) 12179 .addOperand(Scale) 12180 .addOperand(Index) 12181 .addDisp(Disp, 8) 12182 .addOperand(Segment) 12183 .addReg(NextAddrReg) 12184 .setMemRefs(MMOBegin, MMOEnd); 12185 12186 // If we branched, emit the PHI to the front of endMBB. 12187 if (offsetMBB) { 12188 BuildMI(*endMBB, endMBB->begin(), DL, 12189 TII->get(X86::PHI), DestReg) 12190 .addReg(OffsetDestReg).addMBB(offsetMBB) 12191 .addReg(OverflowDestReg).addMBB(overflowMBB); 12192 } 12193 12194 // Erase the pseudo instruction 12195 MI->eraseFromParent(); 12196 12197 return endMBB; 12198} 12199 12200MachineBasicBlock * 12201X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( 12202 MachineInstr *MI, 12203 MachineBasicBlock *MBB) const { 12204 // Emit code to save XMM registers to the stack. The ABI says that the 12205 // number of registers to save is given in %al, so it's theoretically 12206 // possible to do an indirect jump trick to avoid saving all of them, 12207 // however this code takes a simpler approach and just executes all 12208 // of the stores if %al is non-zero. It's less code, and it's probably 12209 // easier on the hardware branch predictor, and stores aren't all that 12210 // expensive anyway. 12211 12212 // Create the new basic blocks. One block contains all the XMM stores, 12213 // and one block is the final destination regardless of whether any 12214 // stores were performed. 12215 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 12216 MachineFunction *F = MBB->getParent(); 12217 MachineFunction::iterator MBBIter = MBB; 12218 ++MBBIter; 12219 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB); 12220 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB); 12221 F->insert(MBBIter, XMMSaveMBB); 12222 F->insert(MBBIter, EndMBB); 12223 12224 // Transfer the remainder of MBB and its successor edges to EndMBB. 12225 EndMBB->splice(EndMBB->begin(), MBB, 12226 llvm::next(MachineBasicBlock::iterator(MI)), 12227 MBB->end()); 12228 EndMBB->transferSuccessorsAndUpdatePHIs(MBB); 12229 12230 // The original block will now fall through to the XMM save block. 12231 MBB->addSuccessor(XMMSaveMBB); 12232 // The XMMSaveMBB will fall through to the end block. 12233 XMMSaveMBB->addSuccessor(EndMBB); 12234 12235 // Now add the instructions. 12236 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12237 DebugLoc DL = MI->getDebugLoc(); 12238 12239 unsigned CountReg = MI->getOperand(0).getReg(); 12240 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm(); 12241 int64_t VarArgsFPOffset = MI->getOperand(2).getImm(); 12242 12243 if (!Subtarget->isTargetWin64()) { 12244 // If %al is 0, branch around the XMM save block. 12245 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg); 12246 BuildMI(MBB, DL, TII->get(X86::JE_4)).addMBB(EndMBB); 12247 MBB->addSuccessor(EndMBB); 12248 } 12249 12250 unsigned MOVOpc = Subtarget->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr; 12251 // In the XMM save block, save all the XMM argument registers. 12252 for (int i = 3, e = MI->getNumOperands(); i != e; ++i) { 12253 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset; 12254 MachineMemOperand *MMO = 12255 F->getMachineMemOperand( 12256 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset), 12257 MachineMemOperand::MOStore, 12258 /*Size=*/16, /*Align=*/16); 12259 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc)) 12260 .addFrameIndex(RegSaveFrameIndex) 12261 .addImm(/*Scale=*/1) 12262 .addReg(/*IndexReg=*/0) 12263 .addImm(/*Disp=*/Offset) 12264 .addReg(/*Segment=*/0) 12265 .addReg(MI->getOperand(i).getReg()) 12266 .addMemOperand(MMO); 12267 } 12268 12269 MI->eraseFromParent(); // The pseudo instruction is gone now. 12270 12271 return EndMBB; 12272} 12273 12274// The EFLAGS operand of SelectItr might be missing a kill marker 12275// because there were multiple uses of EFLAGS, and ISel didn't know 12276// which to mark. Figure out whether SelectItr should have had a 12277// kill marker, and set it if it should. Returns the correct kill 12278// marker value. 12279static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr, 12280 MachineBasicBlock* BB, 12281 const TargetRegisterInfo* TRI) { 12282 // Scan forward through BB for a use/def of EFLAGS. 12283 MachineBasicBlock::iterator miI(llvm::next(SelectItr)); 12284 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { 12285 const MachineInstr& mi = *miI; 12286 if (mi.readsRegister(X86::EFLAGS)) 12287 return false; 12288 if (mi.definesRegister(X86::EFLAGS)) 12289 break; // Should have kill-flag - update below. 12290 } 12291 12292 // If we hit the end of the block, check whether EFLAGS is live into a 12293 // successor. 12294 if (miI == BB->end()) { 12295 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(), 12296 sEnd = BB->succ_end(); 12297 sItr != sEnd; ++sItr) { 12298 MachineBasicBlock* succ = *sItr; 12299 if (succ->isLiveIn(X86::EFLAGS)) 12300 return false; 12301 } 12302 } 12303 12304 // We found a def, or hit the end of the basic block and EFLAGS wasn't live 12305 // out. SelectMI should have a kill flag on EFLAGS. 12306 SelectItr->addRegisterKilled(X86::EFLAGS, TRI); 12307 return true; 12308} 12309 12310MachineBasicBlock * 12311X86TargetLowering::EmitLoweredSelect(MachineInstr *MI, 12312 MachineBasicBlock *BB) const { 12313 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12314 DebugLoc DL = MI->getDebugLoc(); 12315 12316 // To "insert" a SELECT_CC instruction, we actually have to insert the 12317 // diamond control-flow pattern. The incoming instruction knows the 12318 // destination vreg to set, the condition code register to branch on, the 12319 // true/false values to select between, and a branch opcode to use. 12320 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 12321 MachineFunction::iterator It = BB; 12322 ++It; 12323 12324 // thisMBB: 12325 // ... 12326 // TrueVal = ... 12327 // cmpTY ccX, r1, r2 12328 // bCC copy1MBB 12329 // fallthrough --> copy0MBB 12330 MachineBasicBlock *thisMBB = BB; 12331 MachineFunction *F = BB->getParent(); 12332 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 12333 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 12334 F->insert(It, copy0MBB); 12335 F->insert(It, sinkMBB); 12336 12337 // If the EFLAGS register isn't dead in the terminator, then claim that it's 12338 // live into the sink and copy blocks. 12339 const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo(); 12340 if (!MI->killsRegister(X86::EFLAGS) && 12341 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) { 12342 copy0MBB->addLiveIn(X86::EFLAGS); 12343 sinkMBB->addLiveIn(X86::EFLAGS); 12344 } 12345 12346 // Transfer the remainder of BB and its successor edges to sinkMBB. 12347 sinkMBB->splice(sinkMBB->begin(), BB, 12348 llvm::next(MachineBasicBlock::iterator(MI)), 12349 BB->end()); 12350 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 12351 12352 // Add the true and fallthrough blocks as its successors. 12353 BB->addSuccessor(copy0MBB); 12354 BB->addSuccessor(sinkMBB); 12355 12356 // Create the conditional branch instruction. 12357 unsigned Opc = 12358 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 12359 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB); 12360 12361 // copy0MBB: 12362 // %FalseValue = ... 12363 // # fallthrough to sinkMBB 12364 copy0MBB->addSuccessor(sinkMBB); 12365 12366 // sinkMBB: 12367 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 12368 // ... 12369 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 12370 TII->get(X86::PHI), MI->getOperand(0).getReg()) 12371 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 12372 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 12373 12374 MI->eraseFromParent(); // The pseudo instruction is gone now. 12375 return sinkMBB; 12376} 12377 12378MachineBasicBlock * 12379X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB, 12380 bool Is64Bit) const { 12381 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12382 DebugLoc DL = MI->getDebugLoc(); 12383 MachineFunction *MF = BB->getParent(); 12384 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 12385 12386 assert(getTargetMachine().Options.EnableSegmentedStacks); 12387 12388 unsigned TlsReg = Is64Bit ? X86::FS : X86::GS; 12389 unsigned TlsOffset = Is64Bit ? 0x70 : 0x30; 12390 12391 // BB: 12392 // ... [Till the alloca] 12393 // If stacklet is not large enough, jump to mallocMBB 12394 // 12395 // bumpMBB: 12396 // Allocate by subtracting from RSP 12397 // Jump to continueMBB 12398 // 12399 // mallocMBB: 12400 // Allocate by call to runtime 12401 // 12402 // continueMBB: 12403 // ... 12404 // [rest of original BB] 12405 // 12406 12407 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12408 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12409 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12410 12411 MachineRegisterInfo &MRI = MF->getRegInfo(); 12412 const TargetRegisterClass *AddrRegClass = 12413 getRegClassFor(Is64Bit ? MVT::i64:MVT::i32); 12414 12415 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass), 12416 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass), 12417 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass), 12418 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass), 12419 sizeVReg = MI->getOperand(1).getReg(), 12420 physSPReg = Is64Bit ? X86::RSP : X86::ESP; 12421 12422 MachineFunction::iterator MBBIter = BB; 12423 ++MBBIter; 12424 12425 MF->insert(MBBIter, bumpMBB); 12426 MF->insert(MBBIter, mallocMBB); 12427 MF->insert(MBBIter, continueMBB); 12428 12429 continueMBB->splice(continueMBB->begin(), BB, llvm::next 12430 (MachineBasicBlock::iterator(MI)), BB->end()); 12431 continueMBB->transferSuccessorsAndUpdatePHIs(BB); 12432 12433 // Add code to the main basic block to check if the stack limit has been hit, 12434 // and if so, jump to mallocMBB otherwise to bumpMBB. 12435 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg); 12436 BuildMI(BB, DL, TII->get(Is64Bit ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg) 12437 .addReg(tmpSPVReg).addReg(sizeVReg); 12438 BuildMI(BB, DL, TII->get(Is64Bit ? X86::CMP64mr:X86::CMP32mr)) 12439 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg) 12440 .addReg(SPLimitVReg); 12441 BuildMI(BB, DL, TII->get(X86::JG_4)).addMBB(mallocMBB); 12442 12443 // bumpMBB simply decreases the stack pointer, since we know the current 12444 // stacklet has enough space. 12445 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg) 12446 .addReg(SPLimitVReg); 12447 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg) 12448 .addReg(SPLimitVReg); 12449 BuildMI(bumpMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); 12450 12451 // Calls into a routine in libgcc to allocate more space from the heap. 12452 const uint32_t *RegMask = 12453 getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C); 12454 if (Is64Bit) { 12455 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI) 12456 .addReg(sizeVReg); 12457 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32)) 12458 .addExternalSymbol("__morestack_allocate_stack_space") 12459 .addRegMask(RegMask) 12460 .addReg(X86::RDI, RegState::Implicit) 12461 .addReg(X86::RAX, RegState::ImplicitDefine); 12462 } else { 12463 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg) 12464 .addImm(12); 12465 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg); 12466 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32)) 12467 .addExternalSymbol("__morestack_allocate_stack_space") 12468 .addRegMask(RegMask) 12469 .addReg(X86::EAX, RegState::ImplicitDefine); 12470 } 12471 12472 if (!Is64Bit) 12473 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg) 12474 .addImm(16); 12475 12476 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg) 12477 .addReg(Is64Bit ? X86::RAX : X86::EAX); 12478 BuildMI(mallocMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); 12479 12480 // Set up the CFG correctly. 12481 BB->addSuccessor(bumpMBB); 12482 BB->addSuccessor(mallocMBB); 12483 mallocMBB->addSuccessor(continueMBB); 12484 bumpMBB->addSuccessor(continueMBB); 12485 12486 // Take care of the PHI nodes. 12487 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI), 12488 MI->getOperand(0).getReg()) 12489 .addReg(mallocPtrVReg).addMBB(mallocMBB) 12490 .addReg(bumpSPPtrVReg).addMBB(bumpMBB); 12491 12492 // Delete the original pseudo instruction. 12493 MI->eraseFromParent(); 12494 12495 // And we're done. 12496 return continueMBB; 12497} 12498 12499MachineBasicBlock * 12500X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI, 12501 MachineBasicBlock *BB) const { 12502 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12503 DebugLoc DL = MI->getDebugLoc(); 12504 12505 assert(!Subtarget->isTargetEnvMacho()); 12506 12507 // The lowering is pretty easy: we're just emitting the call to _alloca. The 12508 // non-trivial part is impdef of ESP. 12509 12510 if (Subtarget->isTargetWin64()) { 12511 if (Subtarget->isTargetCygMing()) { 12512 // ___chkstk(Mingw64): 12513 // Clobbers R10, R11, RAX and EFLAGS. 12514 // Updates RSP. 12515 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 12516 .addExternalSymbol("___chkstk") 12517 .addReg(X86::RAX, RegState::Implicit) 12518 .addReg(X86::RSP, RegState::Implicit) 12519 .addReg(X86::RAX, RegState::Define | RegState::Implicit) 12520 .addReg(X86::RSP, RegState::Define | RegState::Implicit) 12521 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 12522 } else { 12523 // __chkstk(MSVCRT): does not update stack pointer. 12524 // Clobbers R10, R11 and EFLAGS. 12525 // FIXME: RAX(allocated size) might be reused and not killed. 12526 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 12527 .addExternalSymbol("__chkstk") 12528 .addReg(X86::RAX, RegState::Implicit) 12529 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 12530 // RAX has the offset to subtracted from RSP. 12531 BuildMI(*BB, MI, DL, TII->get(X86::SUB64rr), X86::RSP) 12532 .addReg(X86::RSP) 12533 .addReg(X86::RAX); 12534 } 12535 } else { 12536 const char *StackProbeSymbol = 12537 Subtarget->isTargetWindows() ? "_chkstk" : "_alloca"; 12538 12539 BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32)) 12540 .addExternalSymbol(StackProbeSymbol) 12541 .addReg(X86::EAX, RegState::Implicit) 12542 .addReg(X86::ESP, RegState::Implicit) 12543 .addReg(X86::EAX, RegState::Define | RegState::Implicit) 12544 .addReg(X86::ESP, RegState::Define | RegState::Implicit) 12545 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 12546 } 12547 12548 MI->eraseFromParent(); // The pseudo instruction is gone now. 12549 return BB; 12550} 12551 12552MachineBasicBlock * 12553X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI, 12554 MachineBasicBlock *BB) const { 12555 // This is pretty easy. We're taking the value that we received from 12556 // our load from the relocation, sticking it in either RDI (x86-64) 12557 // or EAX and doing an indirect call. The return value will then 12558 // be in the normal return register. 12559 const X86InstrInfo *TII 12560 = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo()); 12561 DebugLoc DL = MI->getDebugLoc(); 12562 MachineFunction *F = BB->getParent(); 12563 12564 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?"); 12565 assert(MI->getOperand(3).isGlobal() && "This should be a global"); 12566 12567 // Get a register mask for the lowered call. 12568 // FIXME: The 32-bit calls have non-standard calling conventions. Use a 12569 // proper register mask. 12570 const uint32_t *RegMask = 12571 getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C); 12572 if (Subtarget->is64Bit()) { 12573 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 12574 TII->get(X86::MOV64rm), X86::RDI) 12575 .addReg(X86::RIP) 12576 .addImm(0).addReg(0) 12577 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 12578 MI->getOperand(3).getTargetFlags()) 12579 .addReg(0); 12580 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m)); 12581 addDirectMem(MIB, X86::RDI); 12582 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask); 12583 } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { 12584 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 12585 TII->get(X86::MOV32rm), X86::EAX) 12586 .addReg(0) 12587 .addImm(0).addReg(0) 12588 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 12589 MI->getOperand(3).getTargetFlags()) 12590 .addReg(0); 12591 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 12592 addDirectMem(MIB, X86::EAX); 12593 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); 12594 } else { 12595 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 12596 TII->get(X86::MOV32rm), X86::EAX) 12597 .addReg(TII->getGlobalBaseReg(F)) 12598 .addImm(0).addReg(0) 12599 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 12600 MI->getOperand(3).getTargetFlags()) 12601 .addReg(0); 12602 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 12603 addDirectMem(MIB, X86::EAX); 12604 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); 12605 } 12606 12607 MI->eraseFromParent(); // The pseudo instruction is gone now. 12608 return BB; 12609} 12610 12611MachineBasicBlock * 12612X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 12613 MachineBasicBlock *BB) const { 12614 switch (MI->getOpcode()) { 12615 default: llvm_unreachable("Unexpected instr type to insert"); 12616 case X86::TAILJMPd64: 12617 case X86::TAILJMPr64: 12618 case X86::TAILJMPm64: 12619 llvm_unreachable("TAILJMP64 would not be touched here."); 12620 case X86::TCRETURNdi64: 12621 case X86::TCRETURNri64: 12622 case X86::TCRETURNmi64: 12623 return BB; 12624 case X86::WIN_ALLOCA: 12625 return EmitLoweredWinAlloca(MI, BB); 12626 case X86::SEG_ALLOCA_32: 12627 return EmitLoweredSegAlloca(MI, BB, false); 12628 case X86::SEG_ALLOCA_64: 12629 return EmitLoweredSegAlloca(MI, BB, true); 12630 case X86::TLSCall_32: 12631 case X86::TLSCall_64: 12632 return EmitLoweredTLSCall(MI, BB); 12633 case X86::CMOV_GR8: 12634 case X86::CMOV_FR32: 12635 case X86::CMOV_FR64: 12636 case X86::CMOV_V4F32: 12637 case X86::CMOV_V2F64: 12638 case X86::CMOV_V2I64: 12639 case X86::CMOV_V8F32: 12640 case X86::CMOV_V4F64: 12641 case X86::CMOV_V4I64: 12642 case X86::CMOV_GR16: 12643 case X86::CMOV_GR32: 12644 case X86::CMOV_RFP32: 12645 case X86::CMOV_RFP64: 12646 case X86::CMOV_RFP80: 12647 return EmitLoweredSelect(MI, BB); 12648 12649 case X86::FP32_TO_INT16_IN_MEM: 12650 case X86::FP32_TO_INT32_IN_MEM: 12651 case X86::FP32_TO_INT64_IN_MEM: 12652 case X86::FP64_TO_INT16_IN_MEM: 12653 case X86::FP64_TO_INT32_IN_MEM: 12654 case X86::FP64_TO_INT64_IN_MEM: 12655 case X86::FP80_TO_INT16_IN_MEM: 12656 case X86::FP80_TO_INT32_IN_MEM: 12657 case X86::FP80_TO_INT64_IN_MEM: { 12658 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12659 DebugLoc DL = MI->getDebugLoc(); 12660 12661 // Change the floating point control register to use "round towards zero" 12662 // mode when truncating to an integer value. 12663 MachineFunction *F = BB->getParent(); 12664 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false); 12665 addFrameReference(BuildMI(*BB, MI, DL, 12666 TII->get(X86::FNSTCW16m)), CWFrameIdx); 12667 12668 // Load the old value of the high byte of the control word... 12669 unsigned OldCW = 12670 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass); 12671 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW), 12672 CWFrameIdx); 12673 12674 // Set the high part to be round to zero... 12675 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx) 12676 .addImm(0xC7F); 12677 12678 // Reload the modified control word now... 12679 addFrameReference(BuildMI(*BB, MI, DL, 12680 TII->get(X86::FLDCW16m)), CWFrameIdx); 12681 12682 // Restore the memory image of control word to original value 12683 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx) 12684 .addReg(OldCW); 12685 12686 // Get the X86 opcode to use. 12687 unsigned Opc; 12688 switch (MI->getOpcode()) { 12689 default: llvm_unreachable("illegal opcode!"); 12690 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 12691 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 12692 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 12693 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 12694 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 12695 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 12696 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 12697 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 12698 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 12699 } 12700 12701 X86AddressMode AM; 12702 MachineOperand &Op = MI->getOperand(0); 12703 if (Op.isReg()) { 12704 AM.BaseType = X86AddressMode::RegBase; 12705 AM.Base.Reg = Op.getReg(); 12706 } else { 12707 AM.BaseType = X86AddressMode::FrameIndexBase; 12708 AM.Base.FrameIndex = Op.getIndex(); 12709 } 12710 Op = MI->getOperand(1); 12711 if (Op.isImm()) 12712 AM.Scale = Op.getImm(); 12713 Op = MI->getOperand(2); 12714 if (Op.isImm()) 12715 AM.IndexReg = Op.getImm(); 12716 Op = MI->getOperand(3); 12717 if (Op.isGlobal()) { 12718 AM.GV = Op.getGlobal(); 12719 } else { 12720 AM.Disp = Op.getImm(); 12721 } 12722 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM) 12723 .addReg(MI->getOperand(X86::AddrNumOperands).getReg()); 12724 12725 // Reload the original control word now. 12726 addFrameReference(BuildMI(*BB, MI, DL, 12727 TII->get(X86::FLDCW16m)), CWFrameIdx); 12728 12729 MI->eraseFromParent(); // The pseudo instruction is gone now. 12730 return BB; 12731 } 12732 // String/text processing lowering. 12733 case X86::PCMPISTRM128REG: 12734 case X86::VPCMPISTRM128REG: 12735 return EmitPCMP(MI, BB, 3, false /* in-mem */); 12736 case X86::PCMPISTRM128MEM: 12737 case X86::VPCMPISTRM128MEM: 12738 return EmitPCMP(MI, BB, 3, true /* in-mem */); 12739 case X86::PCMPESTRM128REG: 12740 case X86::VPCMPESTRM128REG: 12741 return EmitPCMP(MI, BB, 5, false /* in mem */); 12742 case X86::PCMPESTRM128MEM: 12743 case X86::VPCMPESTRM128MEM: 12744 return EmitPCMP(MI, BB, 5, true /* in mem */); 12745 12746 // Thread synchronization. 12747 case X86::MONITOR: 12748 return EmitMonitor(MI, BB); 12749 12750 // Atomic Lowering. 12751 case X86::ATOMAND32: 12752 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, 12753 X86::AND32ri, X86::MOV32rm, 12754 X86::LCMPXCHG32, 12755 X86::NOT32r, X86::EAX, 12756 &X86::GR32RegClass); 12757 case X86::ATOMOR32: 12758 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr, 12759 X86::OR32ri, X86::MOV32rm, 12760 X86::LCMPXCHG32, 12761 X86::NOT32r, X86::EAX, 12762 &X86::GR32RegClass); 12763 case X86::ATOMXOR32: 12764 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr, 12765 X86::XOR32ri, X86::MOV32rm, 12766 X86::LCMPXCHG32, 12767 X86::NOT32r, X86::EAX, 12768 &X86::GR32RegClass); 12769 case X86::ATOMNAND32: 12770 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, 12771 X86::AND32ri, X86::MOV32rm, 12772 X86::LCMPXCHG32, 12773 X86::NOT32r, X86::EAX, 12774 &X86::GR32RegClass, true); 12775 case X86::ATOMMIN32: 12776 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr); 12777 case X86::ATOMMAX32: 12778 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG32rr); 12779 case X86::ATOMUMIN32: 12780 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB32rr); 12781 case X86::ATOMUMAX32: 12782 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA32rr); 12783 12784 case X86::ATOMAND16: 12785 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr, 12786 X86::AND16ri, X86::MOV16rm, 12787 X86::LCMPXCHG16, 12788 X86::NOT16r, X86::AX, 12789 &X86::GR16RegClass); 12790 case X86::ATOMOR16: 12791 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr, 12792 X86::OR16ri, X86::MOV16rm, 12793 X86::LCMPXCHG16, 12794 X86::NOT16r, X86::AX, 12795 &X86::GR16RegClass); 12796 case X86::ATOMXOR16: 12797 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR16rr, 12798 X86::XOR16ri, X86::MOV16rm, 12799 X86::LCMPXCHG16, 12800 X86::NOT16r, X86::AX, 12801 &X86::GR16RegClass); 12802 case X86::ATOMNAND16: 12803 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr, 12804 X86::AND16ri, X86::MOV16rm, 12805 X86::LCMPXCHG16, 12806 X86::NOT16r, X86::AX, 12807 &X86::GR16RegClass, true); 12808 case X86::ATOMMIN16: 12809 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL16rr); 12810 case X86::ATOMMAX16: 12811 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG16rr); 12812 case X86::ATOMUMIN16: 12813 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB16rr); 12814 case X86::ATOMUMAX16: 12815 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA16rr); 12816 12817 case X86::ATOMAND8: 12818 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr, 12819 X86::AND8ri, X86::MOV8rm, 12820 X86::LCMPXCHG8, 12821 X86::NOT8r, X86::AL, 12822 &X86::GR8RegClass); 12823 case X86::ATOMOR8: 12824 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr, 12825 X86::OR8ri, X86::MOV8rm, 12826 X86::LCMPXCHG8, 12827 X86::NOT8r, X86::AL, 12828 &X86::GR8RegClass); 12829 case X86::ATOMXOR8: 12830 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR8rr, 12831 X86::XOR8ri, X86::MOV8rm, 12832 X86::LCMPXCHG8, 12833 X86::NOT8r, X86::AL, 12834 &X86::GR8RegClass); 12835 case X86::ATOMNAND8: 12836 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr, 12837 X86::AND8ri, X86::MOV8rm, 12838 X86::LCMPXCHG8, 12839 X86::NOT8r, X86::AL, 12840 &X86::GR8RegClass, true); 12841 // FIXME: There are no CMOV8 instructions; MIN/MAX need some other way. 12842 // This group is for 64-bit host. 12843 case X86::ATOMAND64: 12844 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr, 12845 X86::AND64ri32, X86::MOV64rm, 12846 X86::LCMPXCHG64, 12847 X86::NOT64r, X86::RAX, 12848 &X86::GR64RegClass); 12849 case X86::ATOMOR64: 12850 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr, 12851 X86::OR64ri32, X86::MOV64rm, 12852 X86::LCMPXCHG64, 12853 X86::NOT64r, X86::RAX, 12854 &X86::GR64RegClass); 12855 case X86::ATOMXOR64: 12856 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr, 12857 X86::XOR64ri32, X86::MOV64rm, 12858 X86::LCMPXCHG64, 12859 X86::NOT64r, X86::RAX, 12860 &X86::GR64RegClass); 12861 case X86::ATOMNAND64: 12862 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr, 12863 X86::AND64ri32, X86::MOV64rm, 12864 X86::LCMPXCHG64, 12865 X86::NOT64r, X86::RAX, 12866 &X86::GR64RegClass, true); 12867 case X86::ATOMMIN64: 12868 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL64rr); 12869 case X86::ATOMMAX64: 12870 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG64rr); 12871 case X86::ATOMUMIN64: 12872 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB64rr); 12873 case X86::ATOMUMAX64: 12874 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA64rr); 12875 12876 // This group does 64-bit operations on a 32-bit host. 12877 case X86::ATOMAND6432: 12878 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12879 X86::AND32rr, X86::AND32rr, 12880 X86::AND32ri, X86::AND32ri, 12881 false); 12882 case X86::ATOMOR6432: 12883 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12884 X86::OR32rr, X86::OR32rr, 12885 X86::OR32ri, X86::OR32ri, 12886 false); 12887 case X86::ATOMXOR6432: 12888 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12889 X86::XOR32rr, X86::XOR32rr, 12890 X86::XOR32ri, X86::XOR32ri, 12891 false); 12892 case X86::ATOMNAND6432: 12893 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12894 X86::AND32rr, X86::AND32rr, 12895 X86::AND32ri, X86::AND32ri, 12896 true); 12897 case X86::ATOMADD6432: 12898 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12899 X86::ADD32rr, X86::ADC32rr, 12900 X86::ADD32ri, X86::ADC32ri, 12901 false); 12902 case X86::ATOMSUB6432: 12903 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12904 X86::SUB32rr, X86::SBB32rr, 12905 X86::SUB32ri, X86::SBB32ri, 12906 false); 12907 case X86::ATOMSWAP6432: 12908 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12909 X86::MOV32rr, X86::MOV32rr, 12910 X86::MOV32ri, X86::MOV32ri, 12911 false); 12912 case X86::VASTART_SAVE_XMM_REGS: 12913 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB); 12914 12915 case X86::VAARG_64: 12916 return EmitVAARG64WithCustomInserter(MI, BB); 12917 } 12918} 12919 12920//===----------------------------------------------------------------------===// 12921// X86 Optimization Hooks 12922//===----------------------------------------------------------------------===// 12923 12924void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 12925 APInt &KnownZero, 12926 APInt &KnownOne, 12927 const SelectionDAG &DAG, 12928 unsigned Depth) const { 12929 unsigned BitWidth = KnownZero.getBitWidth(); 12930 unsigned Opc = Op.getOpcode(); 12931 assert((Opc >= ISD::BUILTIN_OP_END || 12932 Opc == ISD::INTRINSIC_WO_CHAIN || 12933 Opc == ISD::INTRINSIC_W_CHAIN || 12934 Opc == ISD::INTRINSIC_VOID) && 12935 "Should use MaskedValueIsZero if you don't know whether Op" 12936 " is a target node!"); 12937 12938 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything. 12939 switch (Opc) { 12940 default: break; 12941 case X86ISD::ADD: 12942 case X86ISD::SUB: 12943 case X86ISD::ADC: 12944 case X86ISD::SBB: 12945 case X86ISD::SMUL: 12946 case X86ISD::UMUL: 12947 case X86ISD::INC: 12948 case X86ISD::DEC: 12949 case X86ISD::OR: 12950 case X86ISD::XOR: 12951 case X86ISD::AND: 12952 // These nodes' second result is a boolean. 12953 if (Op.getResNo() == 0) 12954 break; 12955 // Fallthrough 12956 case X86ISD::SETCC: 12957 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 12958 break; 12959 case ISD::INTRINSIC_WO_CHAIN: { 12960 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 12961 unsigned NumLoBits = 0; 12962 switch (IntId) { 12963 default: break; 12964 case Intrinsic::x86_sse_movmsk_ps: 12965 case Intrinsic::x86_avx_movmsk_ps_256: 12966 case Intrinsic::x86_sse2_movmsk_pd: 12967 case Intrinsic::x86_avx_movmsk_pd_256: 12968 case Intrinsic::x86_mmx_pmovmskb: 12969 case Intrinsic::x86_sse2_pmovmskb_128: 12970 case Intrinsic::x86_avx2_pmovmskb: { 12971 // High bits of movmskp{s|d}, pmovmskb are known zero. 12972 switch (IntId) { 12973 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 12974 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break; 12975 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break; 12976 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break; 12977 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break; 12978 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break; 12979 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break; 12980 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break; 12981 } 12982 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits); 12983 break; 12984 } 12985 } 12986 break; 12987 } 12988 } 12989} 12990 12991unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 12992 unsigned Depth) const { 12993 // SETCC_CARRY sets the dest to ~0 for true or 0 for false. 12994 if (Op.getOpcode() == X86ISD::SETCC_CARRY) 12995 return Op.getValueType().getScalarType().getSizeInBits(); 12996 12997 // Fallback case. 12998 return 1; 12999} 13000 13001/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 13002/// node is a GlobalAddress + offset. 13003bool X86TargetLowering::isGAPlusOffset(SDNode *N, 13004 const GlobalValue* &GA, 13005 int64_t &Offset) const { 13006 if (N->getOpcode() == X86ISD::Wrapper) { 13007 if (isa<GlobalAddressSDNode>(N->getOperand(0))) { 13008 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 13009 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset(); 13010 return true; 13011 } 13012 } 13013 return TargetLowering::isGAPlusOffset(N, GA, Offset); 13014} 13015 13016/// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the 13017/// same as extracting the high 128-bit part of 256-bit vector and then 13018/// inserting the result into the low part of a new 256-bit vector 13019static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) { 13020 EVT VT = SVOp->getValueType(0); 13021 unsigned NumElems = VT.getVectorNumElements(); 13022 13023 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 13024 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j) 13025 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 13026 SVOp->getMaskElt(j) >= 0) 13027 return false; 13028 13029 return true; 13030} 13031 13032/// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the 13033/// same as extracting the low 128-bit part of 256-bit vector and then 13034/// inserting the result into the high part of a new 256-bit vector 13035static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) { 13036 EVT VT = SVOp->getValueType(0); 13037 unsigned NumElems = VT.getVectorNumElements(); 13038 13039 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 13040 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j) 13041 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 13042 SVOp->getMaskElt(j) >= 0) 13043 return false; 13044 13045 return true; 13046} 13047 13048/// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors. 13049static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, 13050 TargetLowering::DAGCombinerInfo &DCI, 13051 const X86Subtarget* Subtarget) { 13052 DebugLoc dl = N->getDebugLoc(); 13053 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 13054 SDValue V1 = SVOp->getOperand(0); 13055 SDValue V2 = SVOp->getOperand(1); 13056 EVT VT = SVOp->getValueType(0); 13057 unsigned NumElems = VT.getVectorNumElements(); 13058 13059 if (V1.getOpcode() == ISD::CONCAT_VECTORS && 13060 V2.getOpcode() == ISD::CONCAT_VECTORS) { 13061 // 13062 // 0,0,0,... 13063 // | 13064 // V UNDEF BUILD_VECTOR UNDEF 13065 // \ / \ / 13066 // CONCAT_VECTOR CONCAT_VECTOR 13067 // \ / 13068 // \ / 13069 // RESULT: V + zero extended 13070 // 13071 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR || 13072 V2.getOperand(1).getOpcode() != ISD::UNDEF || 13073 V1.getOperand(1).getOpcode() != ISD::UNDEF) 13074 return SDValue(); 13075 13076 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode())) 13077 return SDValue(); 13078 13079 // To match the shuffle mask, the first half of the mask should 13080 // be exactly the first vector, and all the rest a splat with the 13081 // first element of the second one. 13082 for (unsigned i = 0; i != NumElems/2; ++i) 13083 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) || 13084 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems)) 13085 return SDValue(); 13086 13087 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD. 13088 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) { 13089 if (Ld->hasNUsesOfValue(1, 0)) { 13090 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other); 13091 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() }; 13092 SDValue ResNode = 13093 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 2, 13094 Ld->getMemoryVT(), 13095 Ld->getPointerInfo(), 13096 Ld->getAlignment(), 13097 false/*isVolatile*/, true/*ReadMem*/, 13098 false/*WriteMem*/); 13099 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode); 13100 } 13101 } 13102 13103 // Emit a zeroed vector and insert the desired subvector on its 13104 // first half. 13105 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 13106 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl); 13107 return DCI.CombineTo(N, InsV); 13108 } 13109 13110 //===--------------------------------------------------------------------===// 13111 // Combine some shuffles into subvector extracts and inserts: 13112 // 13113 13114 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 13115 if (isShuffleHigh128VectorInsertLow(SVOp)) { 13116 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl); 13117 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl); 13118 return DCI.CombineTo(N, InsV); 13119 } 13120 13121 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 13122 if (isShuffleLow128VectorInsertHigh(SVOp)) { 13123 SDValue V = Extract128BitVector(V1, 0, DAG, dl); 13124 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl); 13125 return DCI.CombineTo(N, InsV); 13126 } 13127 13128 return SDValue(); 13129} 13130 13131/// PerformShuffleCombine - Performs several different shuffle combines. 13132static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 13133 TargetLowering::DAGCombinerInfo &DCI, 13134 const X86Subtarget *Subtarget) { 13135 DebugLoc dl = N->getDebugLoc(); 13136 EVT VT = N->getValueType(0); 13137 13138 // Don't create instructions with illegal types after legalize types has run. 13139 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13140 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType())) 13141 return SDValue(); 13142 13143 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode 13144 if (Subtarget->hasAVX() && VT.is256BitVector() && 13145 N->getOpcode() == ISD::VECTOR_SHUFFLE) 13146 return PerformShuffleCombine256(N, DAG, DCI, Subtarget); 13147 13148 // Only handle 128 wide vector from here on. 13149 if (!VT.is128BitVector()) 13150 return SDValue(); 13151 13152 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3, 13153 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are 13154 // consecutive, non-overlapping, and in the right order. 13155 SmallVector<SDValue, 16> Elts; 13156 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 13157 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0)); 13158 13159 return EltsFromConsecutiveLoads(VT, Elts, dl, DAG); 13160} 13161 13162 13163/// DCI, PerformTruncateCombine - Converts truncate operation to 13164/// a sequence of vector shuffle operations. 13165/// It is possible when we truncate 256-bit vector to 128-bit vector 13166 13167SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, 13168 DAGCombinerInfo &DCI) const { 13169 if (!DCI.isBeforeLegalizeOps()) 13170 return SDValue(); 13171 13172 if (!Subtarget->hasAVX()) 13173 return SDValue(); 13174 13175 EVT VT = N->getValueType(0); 13176 SDValue Op = N->getOperand(0); 13177 EVT OpVT = Op.getValueType(); 13178 DebugLoc dl = N->getDebugLoc(); 13179 13180 if ((VT == MVT::v4i32) && (OpVT == MVT::v4i64)) { 13181 13182 if (Subtarget->hasAVX2()) { 13183 // AVX2: v4i64 -> v4i32 13184 13185 // VPERMD 13186 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1}; 13187 13188 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v8i32, Op); 13189 Op = DAG.getVectorShuffle(MVT::v8i32, dl, Op, DAG.getUNDEF(MVT::v8i32), 13190 ShufMask); 13191 13192 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Op, 13193 DAG.getIntPtrConstant(0)); 13194 } 13195 13196 // AVX: v4i64 -> v4i32 13197 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op, 13198 DAG.getIntPtrConstant(0)); 13199 13200 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op, 13201 DAG.getIntPtrConstant(2)); 13202 13203 OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpLo); 13204 OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpHi); 13205 13206 // PSHUFD 13207 static const int ShufMask1[] = {0, 2, 0, 0}; 13208 13209 OpLo = DAG.getVectorShuffle(VT, dl, OpLo, DAG.getUNDEF(VT), ShufMask1); 13210 OpHi = DAG.getVectorShuffle(VT, dl, OpHi, DAG.getUNDEF(VT), ShufMask1); 13211 13212 // MOVLHPS 13213 static const int ShufMask2[] = {0, 1, 4, 5}; 13214 13215 return DAG.getVectorShuffle(VT, dl, OpLo, OpHi, ShufMask2); 13216 } 13217 13218 if ((VT == MVT::v8i16) && (OpVT == MVT::v8i32)) { 13219 13220 if (Subtarget->hasAVX2()) { 13221 // AVX2: v8i32 -> v8i16 13222 13223 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v32i8, Op); 13224 13225 // PSHUFB 13226 SmallVector<SDValue,32> pshufbMask; 13227 for (unsigned i = 0; i < 2; ++i) { 13228 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8)); 13229 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8)); 13230 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8)); 13231 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8)); 13232 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8)); 13233 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8)); 13234 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8)); 13235 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8)); 13236 for (unsigned j = 0; j < 8; ++j) 13237 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 13238 } 13239 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v32i8, 13240 &pshufbMask[0], 32); 13241 Op = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v32i8, Op, BV); 13242 13243 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i64, Op); 13244 13245 static const int ShufMask[] = {0, 2, -1, -1}; 13246 Op = DAG.getVectorShuffle(MVT::v4i64, dl, Op, DAG.getUNDEF(MVT::v4i64), 13247 &ShufMask[0]); 13248 13249 Op = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op, 13250 DAG.getIntPtrConstant(0)); 13251 13252 return DAG.getNode(ISD::BITCAST, dl, VT, Op); 13253 } 13254 13255 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i32, Op, 13256 DAG.getIntPtrConstant(0)); 13257 13258 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i32, Op, 13259 DAG.getIntPtrConstant(4)); 13260 13261 OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLo); 13262 OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpHi); 13263 13264 // PSHUFB 13265 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13, 13266 -1, -1, -1, -1, -1, -1, -1, -1}; 13267 13268 OpLo = DAG.getVectorShuffle(MVT::v16i8, dl, OpLo, DAG.getUNDEF(MVT::v16i8), 13269 ShufMask1); 13270 OpHi = DAG.getVectorShuffle(MVT::v16i8, dl, OpHi, DAG.getUNDEF(MVT::v16i8), 13271 ShufMask1); 13272 13273 OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpLo); 13274 OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpHi); 13275 13276 // MOVLHPS 13277 static const int ShufMask2[] = {0, 1, 4, 5}; 13278 13279 SDValue res = DAG.getVectorShuffle(MVT::v4i32, dl, OpLo, OpHi, ShufMask2); 13280 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, res); 13281 } 13282 13283 return SDValue(); 13284} 13285 13286/// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target 13287/// specific shuffle of a load can be folded into a single element load. 13288/// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but 13289/// shuffles have been customed lowered so we need to handle those here. 13290static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG, 13291 TargetLowering::DAGCombinerInfo &DCI) { 13292 if (DCI.isBeforeLegalizeOps()) 13293 return SDValue(); 13294 13295 SDValue InVec = N->getOperand(0); 13296 SDValue EltNo = N->getOperand(1); 13297 13298 if (!isa<ConstantSDNode>(EltNo)) 13299 return SDValue(); 13300 13301 EVT VT = InVec.getValueType(); 13302 13303 bool HasShuffleIntoBitcast = false; 13304 if (InVec.getOpcode() == ISD::BITCAST) { 13305 // Don't duplicate a load with other uses. 13306 if (!InVec.hasOneUse()) 13307 return SDValue(); 13308 EVT BCVT = InVec.getOperand(0).getValueType(); 13309 if (BCVT.getVectorNumElements() != VT.getVectorNumElements()) 13310 return SDValue(); 13311 InVec = InVec.getOperand(0); 13312 HasShuffleIntoBitcast = true; 13313 } 13314 13315 if (!isTargetShuffle(InVec.getOpcode())) 13316 return SDValue(); 13317 13318 // Don't duplicate a load with other uses. 13319 if (!InVec.hasOneUse()) 13320 return SDValue(); 13321 13322 SmallVector<int, 16> ShuffleMask; 13323 bool UnaryShuffle; 13324 if (!getTargetShuffleMask(InVec.getNode(), VT.getSimpleVT(), ShuffleMask, 13325 UnaryShuffle)) 13326 return SDValue(); 13327 13328 // Select the input vector, guarding against out of range extract vector. 13329 unsigned NumElems = VT.getVectorNumElements(); 13330 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 13331 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt]; 13332 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0) 13333 : InVec.getOperand(1); 13334 13335 // If inputs to shuffle are the same for both ops, then allow 2 uses 13336 unsigned AllowedUses = InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1; 13337 13338 if (LdNode.getOpcode() == ISD::BITCAST) { 13339 // Don't duplicate a load with other uses. 13340 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0)) 13341 return SDValue(); 13342 13343 AllowedUses = 1; // only allow 1 load use if we have a bitcast 13344 LdNode = LdNode.getOperand(0); 13345 } 13346 13347 if (!ISD::isNormalLoad(LdNode.getNode())) 13348 return SDValue(); 13349 13350 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode); 13351 13352 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile()) 13353 return SDValue(); 13354 13355 if (HasShuffleIntoBitcast) { 13356 // If there's a bitcast before the shuffle, check if the load type and 13357 // alignment is valid. 13358 unsigned Align = LN0->getAlignment(); 13359 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13360 unsigned NewAlign = TLI.getTargetData()-> 13361 getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); 13362 13363 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT)) 13364 return SDValue(); 13365 } 13366 13367 // All checks match so transform back to vector_shuffle so that DAG combiner 13368 // can finish the job 13369 DebugLoc dl = N->getDebugLoc(); 13370 13371 // Create shuffle node taking into account the case that its a unary shuffle 13372 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(VT) : InVec.getOperand(1); 13373 Shuffle = DAG.getVectorShuffle(InVec.getValueType(), dl, 13374 InVec.getOperand(0), Shuffle, 13375 &ShuffleMask[0]); 13376 Shuffle = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle); 13377 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle, 13378 EltNo); 13379} 13380 13381/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index 13382/// generation and convert it from being a bunch of shuffles and extracts 13383/// to a simple store and scalar loads to extract the elements. 13384static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, 13385 TargetLowering::DAGCombinerInfo &DCI) { 13386 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI); 13387 if (NewOp.getNode()) 13388 return NewOp; 13389 13390 SDValue InputVector = N->getOperand(0); 13391 13392 // Only operate on vectors of 4 elements, where the alternative shuffling 13393 // gets to be more expensive. 13394 if (InputVector.getValueType() != MVT::v4i32) 13395 return SDValue(); 13396 13397 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a 13398 // single use which is a sign-extend or zero-extend, and all elements are 13399 // used. 13400 SmallVector<SDNode *, 4> Uses; 13401 unsigned ExtractedElements = 0; 13402 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(), 13403 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) { 13404 if (UI.getUse().getResNo() != InputVector.getResNo()) 13405 return SDValue(); 13406 13407 SDNode *Extract = *UI; 13408 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 13409 return SDValue(); 13410 13411 if (Extract->getValueType(0) != MVT::i32) 13412 return SDValue(); 13413 if (!Extract->hasOneUse()) 13414 return SDValue(); 13415 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND && 13416 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND) 13417 return SDValue(); 13418 if (!isa<ConstantSDNode>(Extract->getOperand(1))) 13419 return SDValue(); 13420 13421 // Record which element was extracted. 13422 ExtractedElements |= 13423 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue(); 13424 13425 Uses.push_back(Extract); 13426 } 13427 13428 // If not all the elements were used, this may not be worthwhile. 13429 if (ExtractedElements != 15) 13430 return SDValue(); 13431 13432 // Ok, we've now decided to do the transformation. 13433 DebugLoc dl = InputVector.getDebugLoc(); 13434 13435 // Store the value to a temporary stack slot. 13436 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType()); 13437 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr, 13438 MachinePointerInfo(), false, false, 0); 13439 13440 // Replace each use (extract) with a load of the appropriate element. 13441 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(), 13442 UE = Uses.end(); UI != UE; ++UI) { 13443 SDNode *Extract = *UI; 13444 13445 // cOMpute the element's address. 13446 SDValue Idx = Extract->getOperand(1); 13447 unsigned EltSize = 13448 InputVector.getValueType().getVectorElementType().getSizeInBits()/8; 13449 uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue(); 13450 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13451 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy()); 13452 13453 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), 13454 StackPtr, OffsetVal); 13455 13456 // Load the scalar. 13457 SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch, 13458 ScalarAddr, MachinePointerInfo(), 13459 false, false, false, 0); 13460 13461 // Replace the exact with the load. 13462 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar); 13463 } 13464 13465 // The replacement was made in place; don't return anything. 13466 return SDValue(); 13467} 13468 13469/// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT 13470/// nodes. 13471static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 13472 TargetLowering::DAGCombinerInfo &DCI, 13473 const X86Subtarget *Subtarget) { 13474 DebugLoc DL = N->getDebugLoc(); 13475 SDValue Cond = N->getOperand(0); 13476 // Get the LHS/RHS of the select. 13477 SDValue LHS = N->getOperand(1); 13478 SDValue RHS = N->getOperand(2); 13479 EVT VT = LHS.getValueType(); 13480 13481 // If we have SSE[12] support, try to form min/max nodes. SSE min/max 13482 // instructions match the semantics of the common C idiom x<y?x:y but not 13483 // x<=y?x:y, because of how they handle negative zero (which can be 13484 // ignored in unsafe-math mode). 13485 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() && 13486 VT != MVT::f80 && DAG.getTargetLoweringInfo().isTypeLegal(VT) && 13487 (Subtarget->hasSSE2() || 13488 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) { 13489 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 13490 13491 unsigned Opcode = 0; 13492 // Check for x CC y ? x : y. 13493 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) && 13494 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 13495 switch (CC) { 13496 default: break; 13497 case ISD::SETULT: 13498 // Converting this to a min would handle NaNs incorrectly, and swapping 13499 // the operands would cause it to handle comparisons between positive 13500 // and negative zero incorrectly. 13501 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 13502 if (!DAG.getTarget().Options.UnsafeFPMath && 13503 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 13504 break; 13505 std::swap(LHS, RHS); 13506 } 13507 Opcode = X86ISD::FMIN; 13508 break; 13509 case ISD::SETOLE: 13510 // Converting this to a min would handle comparisons between positive 13511 // and negative zero incorrectly. 13512 if (!DAG.getTarget().Options.UnsafeFPMath && 13513 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 13514 break; 13515 Opcode = X86ISD::FMIN; 13516 break; 13517 case ISD::SETULE: 13518 // Converting this to a min would handle both negative zeros and NaNs 13519 // incorrectly, but we can swap the operands to fix both. 13520 std::swap(LHS, RHS); 13521 case ISD::SETOLT: 13522 case ISD::SETLT: 13523 case ISD::SETLE: 13524 Opcode = X86ISD::FMIN; 13525 break; 13526 13527 case ISD::SETOGE: 13528 // Converting this to a max would handle comparisons between positive 13529 // and negative zero incorrectly. 13530 if (!DAG.getTarget().Options.UnsafeFPMath && 13531 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 13532 break; 13533 Opcode = X86ISD::FMAX; 13534 break; 13535 case ISD::SETUGT: 13536 // Converting this to a max would handle NaNs incorrectly, and swapping 13537 // the operands would cause it to handle comparisons between positive 13538 // and negative zero incorrectly. 13539 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 13540 if (!DAG.getTarget().Options.UnsafeFPMath && 13541 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 13542 break; 13543 std::swap(LHS, RHS); 13544 } 13545 Opcode = X86ISD::FMAX; 13546 break; 13547 case ISD::SETUGE: 13548 // Converting this to a max would handle both negative zeros and NaNs 13549 // incorrectly, but we can swap the operands to fix both. 13550 std::swap(LHS, RHS); 13551 case ISD::SETOGT: 13552 case ISD::SETGT: 13553 case ISD::SETGE: 13554 Opcode = X86ISD::FMAX; 13555 break; 13556 } 13557 // Check for x CC y ? y : x -- a min/max with reversed arms. 13558 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) && 13559 DAG.isEqualTo(RHS, Cond.getOperand(0))) { 13560 switch (CC) { 13561 default: break; 13562 case ISD::SETOGE: 13563 // Converting this to a min would handle comparisons between positive 13564 // and negative zero incorrectly, and swapping the operands would 13565 // cause it to handle NaNs incorrectly. 13566 if (!DAG.getTarget().Options.UnsafeFPMath && 13567 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) { 13568 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 13569 break; 13570 std::swap(LHS, RHS); 13571 } 13572 Opcode = X86ISD::FMIN; 13573 break; 13574 case ISD::SETUGT: 13575 // Converting this to a min would handle NaNs incorrectly. 13576 if (!DAG.getTarget().Options.UnsafeFPMath && 13577 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) 13578 break; 13579 Opcode = X86ISD::FMIN; 13580 break; 13581 case ISD::SETUGE: 13582 // Converting this to a min would handle both negative zeros and NaNs 13583 // incorrectly, but we can swap the operands to fix both. 13584 std::swap(LHS, RHS); 13585 case ISD::SETOGT: 13586 case ISD::SETGT: 13587 case ISD::SETGE: 13588 Opcode = X86ISD::FMIN; 13589 break; 13590 13591 case ISD::SETULT: 13592 // Converting this to a max would handle NaNs incorrectly. 13593 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 13594 break; 13595 Opcode = X86ISD::FMAX; 13596 break; 13597 case ISD::SETOLE: 13598 // Converting this to a max would handle comparisons between positive 13599 // and negative zero incorrectly, and swapping the operands would 13600 // cause it to handle NaNs incorrectly. 13601 if (!DAG.getTarget().Options.UnsafeFPMath && 13602 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) { 13603 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 13604 break; 13605 std::swap(LHS, RHS); 13606 } 13607 Opcode = X86ISD::FMAX; 13608 break; 13609 case ISD::SETULE: 13610 // Converting this to a max would handle both negative zeros and NaNs 13611 // incorrectly, but we can swap the operands to fix both. 13612 std::swap(LHS, RHS); 13613 case ISD::SETOLT: 13614 case ISD::SETLT: 13615 case ISD::SETLE: 13616 Opcode = X86ISD::FMAX; 13617 break; 13618 } 13619 } 13620 13621 if (Opcode) 13622 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS); 13623 } 13624 13625 // If this is a select between two integer constants, try to do some 13626 // optimizations. 13627 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) { 13628 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS)) 13629 // Don't do this for crazy integer types. 13630 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) { 13631 // If this is efficiently invertible, canonicalize the LHSC/RHSC values 13632 // so that TrueC (the true value) is larger than FalseC. 13633 bool NeedsCondInvert = false; 13634 13635 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) && 13636 // Efficiently invertible. 13637 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible. 13638 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible. 13639 isa<ConstantSDNode>(Cond.getOperand(1))))) { 13640 NeedsCondInvert = true; 13641 std::swap(TrueC, FalseC); 13642 } 13643 13644 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0. 13645 if (FalseC->getAPIntValue() == 0 && 13646 TrueC->getAPIntValue().isPowerOf2()) { 13647 if (NeedsCondInvert) // Invert the condition if needed. 13648 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 13649 DAG.getConstant(1, Cond.getValueType())); 13650 13651 // Zero extend the condition if needed. 13652 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond); 13653 13654 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 13655 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond, 13656 DAG.getConstant(ShAmt, MVT::i8)); 13657 } 13658 13659 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. 13660 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 13661 if (NeedsCondInvert) // Invert the condition if needed. 13662 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 13663 DAG.getConstant(1, Cond.getValueType())); 13664 13665 // Zero extend the condition if needed. 13666 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 13667 FalseC->getValueType(0), Cond); 13668 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 13669 SDValue(FalseC, 0)); 13670 } 13671 13672 // Optimize cases that will turn into an LEA instruction. This requires 13673 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 13674 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 13675 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 13676 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 13677 13678 bool isFastMultiplier = false; 13679 if (Diff < 10) { 13680 switch ((unsigned char)Diff) { 13681 default: break; 13682 case 1: // result = add base, cond 13683 case 2: // result = lea base( , cond*2) 13684 case 3: // result = lea base(cond, cond*2) 13685 case 4: // result = lea base( , cond*4) 13686 case 5: // result = lea base(cond, cond*4) 13687 case 8: // result = lea base( , cond*8) 13688 case 9: // result = lea base(cond, cond*8) 13689 isFastMultiplier = true; 13690 break; 13691 } 13692 } 13693 13694 if (isFastMultiplier) { 13695 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 13696 if (NeedsCondInvert) // Invert the condition if needed. 13697 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 13698 DAG.getConstant(1, Cond.getValueType())); 13699 13700 // Zero extend the condition if needed. 13701 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 13702 Cond); 13703 // Scale the condition by the difference. 13704 if (Diff != 1) 13705 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 13706 DAG.getConstant(Diff, Cond.getValueType())); 13707 13708 // Add the base if non-zero. 13709 if (FalseC->getAPIntValue() != 0) 13710 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 13711 SDValue(FalseC, 0)); 13712 return Cond; 13713 } 13714 } 13715 } 13716 } 13717 13718 // Canonicalize max and min: 13719 // (x > y) ? x : y -> (x >= y) ? x : y 13720 // (x < y) ? x : y -> (x <= y) ? x : y 13721 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates 13722 // the need for an extra compare 13723 // against zero. e.g. 13724 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0 13725 // subl %esi, %edi 13726 // testl %edi, %edi 13727 // movl $0, %eax 13728 // cmovgl %edi, %eax 13729 // => 13730 // xorl %eax, %eax 13731 // subl %esi, $edi 13732 // cmovsl %eax, %edi 13733 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC && 13734 DAG.isEqualTo(LHS, Cond.getOperand(0)) && 13735 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 13736 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 13737 switch (CC) { 13738 default: break; 13739 case ISD::SETLT: 13740 case ISD::SETGT: { 13741 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE; 13742 Cond = DAG.getSetCC(Cond.getDebugLoc(), Cond.getValueType(), 13743 Cond.getOperand(0), Cond.getOperand(1), NewCC); 13744 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS); 13745 } 13746 } 13747 } 13748 13749 // If we know that this node is legal then we know that it is going to be 13750 // matched by one of the SSE/AVX BLEND instructions. These instructions only 13751 // depend on the highest bit in each word. Try to use SimplifyDemandedBits 13752 // to simplify previous instructions. 13753 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13754 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() && 13755 !DCI.isBeforeLegalize() && TLI.isOperationLegal(ISD::VSELECT, VT)) { 13756 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits(); 13757 13758 // Don't optimize vector selects that map to mask-registers. 13759 if (BitWidth == 1) 13760 return SDValue(); 13761 13762 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size"); 13763 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1); 13764 13765 APInt KnownZero, KnownOne; 13766 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(), 13767 DCI.isBeforeLegalizeOps()); 13768 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) || 13769 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne, TLO)) 13770 DCI.CommitTargetLoweringOpt(TLO); 13771 } 13772 13773 return SDValue(); 13774} 13775 13776// Check whether a boolean test is testing a boolean value generated by 13777// X86ISD::SETCC. If so, return the operand of that SETCC and proper condition 13778// code. 13779// 13780// Simplify the following patterns: 13781// (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or 13782// (Op (CMP (SETCC Cond EFLAGS) 0) NEQ) 13783// to (Op EFLAGS Cond) 13784// 13785// (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or 13786// (Op (CMP (SETCC Cond EFLAGS) 1) NEQ) 13787// to (Op EFLAGS !Cond) 13788// 13789// where Op could be BRCOND or CMOV. 13790// 13791static SDValue BoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) { 13792 // Quit if not CMP and SUB with its value result used. 13793 if (Cmp.getOpcode() != X86ISD::CMP && 13794 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0))) 13795 return SDValue(); 13796 13797 // Quit if not used as a boolean value. 13798 if (CC != X86::COND_E && CC != X86::COND_NE) 13799 return SDValue(); 13800 13801 // Check CMP operands. One of them should be 0 or 1 and the other should be 13802 // an SetCC or extended from it. 13803 SDValue Op1 = Cmp.getOperand(0); 13804 SDValue Op2 = Cmp.getOperand(1); 13805 13806 SDValue SetCC; 13807 const ConstantSDNode* C = 0; 13808 bool needOppositeCond = (CC == X86::COND_E); 13809 13810 if ((C = dyn_cast<ConstantSDNode>(Op1))) 13811 SetCC = Op2; 13812 else if ((C = dyn_cast<ConstantSDNode>(Op2))) 13813 SetCC = Op1; 13814 else // Quit if all operands are not constants. 13815 return SDValue(); 13816 13817 if (C->getZExtValue() == 1) 13818 needOppositeCond = !needOppositeCond; 13819 else if (C->getZExtValue() != 0) 13820 // Quit if the constant is neither 0 or 1. 13821 return SDValue(); 13822 13823 // Skip 'zext' node. 13824 if (SetCC.getOpcode() == ISD::ZERO_EXTEND) 13825 SetCC = SetCC.getOperand(0); 13826 13827 // Quit if not SETCC. 13828 // FIXME: So far we only handle the boolean value generated from SETCC. If 13829 // there is other ways to generate boolean values, we need handle them here 13830 // as well. 13831 if (SetCC.getOpcode() != X86ISD::SETCC) 13832 return SDValue(); 13833 13834 // Set the condition code or opposite one if necessary. 13835 CC = X86::CondCode(SetCC.getConstantOperandVal(0)); 13836 if (needOppositeCond) 13837 CC = X86::GetOppositeBranchCondition(CC); 13838 13839 return SetCC.getOperand(1); 13840} 13841 13842static bool IsValidFCMOVCondition(X86::CondCode CC) { 13843 switch (CC) { 13844 default: 13845 return false; 13846 case X86::COND_B: 13847 case X86::COND_BE: 13848 case X86::COND_E: 13849 case X86::COND_P: 13850 case X86::COND_AE: 13851 case X86::COND_A: 13852 case X86::COND_NE: 13853 case X86::COND_NP: 13854 return true; 13855 } 13856} 13857 13858/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL] 13859static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, 13860 TargetLowering::DAGCombinerInfo &DCI) { 13861 DebugLoc DL = N->getDebugLoc(); 13862 13863 // If the flag operand isn't dead, don't touch this CMOV. 13864 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty()) 13865 return SDValue(); 13866 13867 SDValue FalseOp = N->getOperand(0); 13868 SDValue TrueOp = N->getOperand(1); 13869 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2); 13870 SDValue Cond = N->getOperand(3); 13871 13872 if (CC == X86::COND_E || CC == X86::COND_NE) { 13873 switch (Cond.getOpcode()) { 13874 default: break; 13875 case X86ISD::BSR: 13876 case X86ISD::BSF: 13877 // If operand of BSR / BSF are proven never zero, then ZF cannot be set. 13878 if (DAG.isKnownNeverZero(Cond.getOperand(0))) 13879 return (CC == X86::COND_E) ? FalseOp : TrueOp; 13880 } 13881 } 13882 13883 SDValue Flags; 13884 13885 Flags = BoolTestSetCCCombine(Cond, CC); 13886 if (Flags.getNode() && 13887 // Extra check as FCMOV only supports a subset of X86 cond. 13888 (FalseOp.getValueType() != MVT::f80 || IsValidFCMOVCondition(CC))) { 13889 SDValue Ops[] = { FalseOp, TrueOp, 13890 DAG.getConstant(CC, MVT::i8), Flags }; 13891 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), 13892 Ops, array_lengthof(Ops)); 13893 } 13894 13895 // If this is a select between two integer constants, try to do some 13896 // optimizations. Note that the operands are ordered the opposite of SELECT 13897 // operands. 13898 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) { 13899 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) { 13900 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is 13901 // larger than FalseC (the false value). 13902 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) { 13903 CC = X86::GetOppositeBranchCondition(CC); 13904 std::swap(TrueC, FalseC); 13905 } 13906 13907 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0. 13908 // This is efficient for any integer data type (including i8/i16) and 13909 // shift amount. 13910 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) { 13911 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 13912 DAG.getConstant(CC, MVT::i8), Cond); 13913 13914 // Zero extend the condition if needed. 13915 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond); 13916 13917 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 13918 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond, 13919 DAG.getConstant(ShAmt, MVT::i8)); 13920 if (N->getNumValues() == 2) // Dead flag value? 13921 return DCI.CombineTo(N, Cond, SDValue()); 13922 return Cond; 13923 } 13924 13925 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient 13926 // for any integer data type, including i8/i16. 13927 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 13928 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 13929 DAG.getConstant(CC, MVT::i8), Cond); 13930 13931 // Zero extend the condition if needed. 13932 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 13933 FalseC->getValueType(0), Cond); 13934 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 13935 SDValue(FalseC, 0)); 13936 13937 if (N->getNumValues() == 2) // Dead flag value? 13938 return DCI.CombineTo(N, Cond, SDValue()); 13939 return Cond; 13940 } 13941 13942 // Optimize cases that will turn into an LEA instruction. This requires 13943 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 13944 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 13945 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 13946 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 13947 13948 bool isFastMultiplier = false; 13949 if (Diff < 10) { 13950 switch ((unsigned char)Diff) { 13951 default: break; 13952 case 1: // result = add base, cond 13953 case 2: // result = lea base( , cond*2) 13954 case 3: // result = lea base(cond, cond*2) 13955 case 4: // result = lea base( , cond*4) 13956 case 5: // result = lea base(cond, cond*4) 13957 case 8: // result = lea base( , cond*8) 13958 case 9: // result = lea base(cond, cond*8) 13959 isFastMultiplier = true; 13960 break; 13961 } 13962 } 13963 13964 if (isFastMultiplier) { 13965 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 13966 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 13967 DAG.getConstant(CC, MVT::i8), Cond); 13968 // Zero extend the condition if needed. 13969 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 13970 Cond); 13971 // Scale the condition by the difference. 13972 if (Diff != 1) 13973 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 13974 DAG.getConstant(Diff, Cond.getValueType())); 13975 13976 // Add the base if non-zero. 13977 if (FalseC->getAPIntValue() != 0) 13978 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 13979 SDValue(FalseC, 0)); 13980 if (N->getNumValues() == 2) // Dead flag value? 13981 return DCI.CombineTo(N, Cond, SDValue()); 13982 return Cond; 13983 } 13984 } 13985 } 13986 } 13987 return SDValue(); 13988} 13989 13990 13991/// PerformMulCombine - Optimize a single multiply with constant into two 13992/// in order to implement it with two cheaper instructions, e.g. 13993/// LEA + SHL, LEA + LEA. 13994static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG, 13995 TargetLowering::DAGCombinerInfo &DCI) { 13996 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 13997 return SDValue(); 13998 13999 EVT VT = N->getValueType(0); 14000 if (VT != MVT::i64) 14001 return SDValue(); 14002 14003 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 14004 if (!C) 14005 return SDValue(); 14006 uint64_t MulAmt = C->getZExtValue(); 14007 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9) 14008 return SDValue(); 14009 14010 uint64_t MulAmt1 = 0; 14011 uint64_t MulAmt2 = 0; 14012 if ((MulAmt % 9) == 0) { 14013 MulAmt1 = 9; 14014 MulAmt2 = MulAmt / 9; 14015 } else if ((MulAmt % 5) == 0) { 14016 MulAmt1 = 5; 14017 MulAmt2 = MulAmt / 5; 14018 } else if ((MulAmt % 3) == 0) { 14019 MulAmt1 = 3; 14020 MulAmt2 = MulAmt / 3; 14021 } 14022 if (MulAmt2 && 14023 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){ 14024 DebugLoc DL = N->getDebugLoc(); 14025 14026 if (isPowerOf2_64(MulAmt2) && 14027 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD)) 14028 // If second multiplifer is pow2, issue it first. We want the multiply by 14029 // 3, 5, or 9 to be folded into the addressing mode unless the lone use 14030 // is an add. 14031 std::swap(MulAmt1, MulAmt2); 14032 14033 SDValue NewMul; 14034 if (isPowerOf2_64(MulAmt1)) 14035 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 14036 DAG.getConstant(Log2_64(MulAmt1), MVT::i8)); 14037 else 14038 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), 14039 DAG.getConstant(MulAmt1, VT)); 14040 14041 if (isPowerOf2_64(MulAmt2)) 14042 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul, 14043 DAG.getConstant(Log2_64(MulAmt2), MVT::i8)); 14044 else 14045 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul, 14046 DAG.getConstant(MulAmt2, VT)); 14047 14048 // Do not add new nodes to DAG combiner worklist. 14049 DCI.CombineTo(N, NewMul, false); 14050 } 14051 return SDValue(); 14052} 14053 14054static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { 14055 SDValue N0 = N->getOperand(0); 14056 SDValue N1 = N->getOperand(1); 14057 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 14058 EVT VT = N0.getValueType(); 14059 14060 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2)) 14061 // since the result of setcc_c is all zero's or all ones. 14062 if (VT.isInteger() && !VT.isVector() && 14063 N1C && N0.getOpcode() == ISD::AND && 14064 N0.getOperand(1).getOpcode() == ISD::Constant) { 14065 SDValue N00 = N0.getOperand(0); 14066 if (N00.getOpcode() == X86ISD::SETCC_CARRY || 14067 ((N00.getOpcode() == ISD::ANY_EXTEND || 14068 N00.getOpcode() == ISD::ZERO_EXTEND) && 14069 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) { 14070 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 14071 APInt ShAmt = N1C->getAPIntValue(); 14072 Mask = Mask.shl(ShAmt); 14073 if (Mask != 0) 14074 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 14075 N00, DAG.getConstant(Mask, VT)); 14076 } 14077 } 14078 14079 14080 // Hardware support for vector shifts is sparse which makes us scalarize the 14081 // vector operations in many cases. Also, on sandybridge ADD is faster than 14082 // shl. 14083 // (shl V, 1) -> add V,V 14084 if (isSplatVector(N1.getNode())) { 14085 assert(N0.getValueType().isVector() && "Invalid vector shift type"); 14086 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(0)); 14087 // We shift all of the values by one. In many cases we do not have 14088 // hardware support for this operation. This is better expressed as an ADD 14089 // of two values. 14090 if (N1C && (1 == N1C->getZExtValue())) { 14091 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, N0); 14092 } 14093 } 14094 14095 return SDValue(); 14096} 14097 14098/// PerformShiftCombine - Transforms vector shift nodes to use vector shifts 14099/// when possible. 14100static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG, 14101 TargetLowering::DAGCombinerInfo &DCI, 14102 const X86Subtarget *Subtarget) { 14103 EVT VT = N->getValueType(0); 14104 if (N->getOpcode() == ISD::SHL) { 14105 SDValue V = PerformSHLCombine(N, DAG); 14106 if (V.getNode()) return V; 14107 } 14108 14109 // On X86 with SSE2 support, we can transform this to a vector shift if 14110 // all elements are shifted by the same amount. We can't do this in legalize 14111 // because the a constant vector is typically transformed to a constant pool 14112 // so we have no knowledge of the shift amount. 14113 if (!Subtarget->hasSSE2()) 14114 return SDValue(); 14115 14116 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 && 14117 (!Subtarget->hasAVX2() || 14118 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16))) 14119 return SDValue(); 14120 14121 SDValue ShAmtOp = N->getOperand(1); 14122 EVT EltVT = VT.getVectorElementType(); 14123 DebugLoc DL = N->getDebugLoc(); 14124 SDValue BaseShAmt = SDValue(); 14125 if (ShAmtOp.getOpcode() == ISD::BUILD_VECTOR) { 14126 unsigned NumElts = VT.getVectorNumElements(); 14127 unsigned i = 0; 14128 for (; i != NumElts; ++i) { 14129 SDValue Arg = ShAmtOp.getOperand(i); 14130 if (Arg.getOpcode() == ISD::UNDEF) continue; 14131 BaseShAmt = Arg; 14132 break; 14133 } 14134 // Handle the case where the build_vector is all undef 14135 // FIXME: Should DAG allow this? 14136 if (i == NumElts) 14137 return SDValue(); 14138 14139 for (; i != NumElts; ++i) { 14140 SDValue Arg = ShAmtOp.getOperand(i); 14141 if (Arg.getOpcode() == ISD::UNDEF) continue; 14142 if (Arg != BaseShAmt) { 14143 return SDValue(); 14144 } 14145 } 14146 } else if (ShAmtOp.getOpcode() == ISD::VECTOR_SHUFFLE && 14147 cast<ShuffleVectorSDNode>(ShAmtOp)->isSplat()) { 14148 SDValue InVec = ShAmtOp.getOperand(0); 14149 if (InVec.getOpcode() == ISD::BUILD_VECTOR) { 14150 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 14151 unsigned i = 0; 14152 for (; i != NumElts; ++i) { 14153 SDValue Arg = InVec.getOperand(i); 14154 if (Arg.getOpcode() == ISD::UNDEF) continue; 14155 BaseShAmt = Arg; 14156 break; 14157 } 14158 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) { 14159 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(InVec.getOperand(2))) { 14160 unsigned SplatIdx= cast<ShuffleVectorSDNode>(ShAmtOp)->getSplatIndex(); 14161 if (C->getZExtValue() == SplatIdx) 14162 BaseShAmt = InVec.getOperand(1); 14163 } 14164 } 14165 if (BaseShAmt.getNode() == 0) { 14166 // Don't create instructions with illegal types after legalize 14167 // types has run. 14168 if (!DAG.getTargetLoweringInfo().isTypeLegal(EltVT) && 14169 !DCI.isBeforeLegalize()) 14170 return SDValue(); 14171 14172 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, ShAmtOp, 14173 DAG.getIntPtrConstant(0)); 14174 } 14175 } else 14176 return SDValue(); 14177 14178 // The shift amount is an i32. 14179 if (EltVT.bitsGT(MVT::i32)) 14180 BaseShAmt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, BaseShAmt); 14181 else if (EltVT.bitsLT(MVT::i32)) 14182 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, BaseShAmt); 14183 14184 // The shift amount is identical so we can do a vector shift. 14185 SDValue ValOp = N->getOperand(0); 14186 switch (N->getOpcode()) { 14187 default: 14188 llvm_unreachable("Unknown shift opcode!"); 14189 case ISD::SHL: 14190 switch (VT.getSimpleVT().SimpleTy) { 14191 default: return SDValue(); 14192 case MVT::v2i64: 14193 case MVT::v4i32: 14194 case MVT::v8i16: 14195 case MVT::v4i64: 14196 case MVT::v8i32: 14197 case MVT::v16i16: 14198 return getTargetVShiftNode(X86ISD::VSHLI, DL, VT, ValOp, BaseShAmt, DAG); 14199 } 14200 case ISD::SRA: 14201 switch (VT.getSimpleVT().SimpleTy) { 14202 default: return SDValue(); 14203 case MVT::v4i32: 14204 case MVT::v8i16: 14205 case MVT::v8i32: 14206 case MVT::v16i16: 14207 return getTargetVShiftNode(X86ISD::VSRAI, DL, VT, ValOp, BaseShAmt, DAG); 14208 } 14209 case ISD::SRL: 14210 switch (VT.getSimpleVT().SimpleTy) { 14211 default: return SDValue(); 14212 case MVT::v2i64: 14213 case MVT::v4i32: 14214 case MVT::v8i16: 14215 case MVT::v4i64: 14216 case MVT::v8i32: 14217 case MVT::v16i16: 14218 return getTargetVShiftNode(X86ISD::VSRLI, DL, VT, ValOp, BaseShAmt, DAG); 14219 } 14220 } 14221} 14222 14223 14224// CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..)) 14225// where both setccs reference the same FP CMP, and rewrite for CMPEQSS 14226// and friends. Likewise for OR -> CMPNEQSS. 14227static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG, 14228 TargetLowering::DAGCombinerInfo &DCI, 14229 const X86Subtarget *Subtarget) { 14230 unsigned opcode; 14231 14232 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but 14233 // we're requiring SSE2 for both. 14234 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) { 14235 SDValue N0 = N->getOperand(0); 14236 SDValue N1 = N->getOperand(1); 14237 SDValue CMP0 = N0->getOperand(1); 14238 SDValue CMP1 = N1->getOperand(1); 14239 DebugLoc DL = N->getDebugLoc(); 14240 14241 // The SETCCs should both refer to the same CMP. 14242 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1) 14243 return SDValue(); 14244 14245 SDValue CMP00 = CMP0->getOperand(0); 14246 SDValue CMP01 = CMP0->getOperand(1); 14247 EVT VT = CMP00.getValueType(); 14248 14249 if (VT == MVT::f32 || VT == MVT::f64) { 14250 bool ExpectingFlags = false; 14251 // Check for any users that want flags: 14252 for (SDNode::use_iterator UI = N->use_begin(), 14253 UE = N->use_end(); 14254 !ExpectingFlags && UI != UE; ++UI) 14255 switch (UI->getOpcode()) { 14256 default: 14257 case ISD::BR_CC: 14258 case ISD::BRCOND: 14259 case ISD::SELECT: 14260 ExpectingFlags = true; 14261 break; 14262 case ISD::CopyToReg: 14263 case ISD::SIGN_EXTEND: 14264 case ISD::ZERO_EXTEND: 14265 case ISD::ANY_EXTEND: 14266 break; 14267 } 14268 14269 if (!ExpectingFlags) { 14270 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0); 14271 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0); 14272 14273 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) { 14274 X86::CondCode tmp = cc0; 14275 cc0 = cc1; 14276 cc1 = tmp; 14277 } 14278 14279 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) || 14280 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) { 14281 bool is64BitFP = (CMP00.getValueType() == MVT::f64); 14282 X86ISD::NodeType NTOperator = is64BitFP ? 14283 X86ISD::FSETCCsd : X86ISD::FSETCCss; 14284 // FIXME: need symbolic constants for these magic numbers. 14285 // See X86ATTInstPrinter.cpp:printSSECC(). 14286 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4; 14287 SDValue OnesOrZeroesF = DAG.getNode(NTOperator, DL, MVT::f32, CMP00, CMP01, 14288 DAG.getConstant(x86cc, MVT::i8)); 14289 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, MVT::i32, 14290 OnesOrZeroesF); 14291 SDValue ANDed = DAG.getNode(ISD::AND, DL, MVT::i32, OnesOrZeroesI, 14292 DAG.getConstant(1, MVT::i32)); 14293 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed); 14294 return OneBitOfTruth; 14295 } 14296 } 14297 } 14298 } 14299 return SDValue(); 14300} 14301 14302/// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector 14303/// so it can be folded inside ANDNP. 14304static bool CanFoldXORWithAllOnes(const SDNode *N) { 14305 EVT VT = N->getValueType(0); 14306 14307 // Match direct AllOnes for 128 and 256-bit vectors 14308 if (ISD::isBuildVectorAllOnes(N)) 14309 return true; 14310 14311 // Look through a bit convert. 14312 if (N->getOpcode() == ISD::BITCAST) 14313 N = N->getOperand(0).getNode(); 14314 14315 // Sometimes the operand may come from a insert_subvector building a 256-bit 14316 // allones vector 14317 if (VT.is256BitVector() && 14318 N->getOpcode() == ISD::INSERT_SUBVECTOR) { 14319 SDValue V1 = N->getOperand(0); 14320 SDValue V2 = N->getOperand(1); 14321 14322 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR && 14323 V1.getOperand(0).getOpcode() == ISD::UNDEF && 14324 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) && 14325 ISD::isBuildVectorAllOnes(V2.getNode())) 14326 return true; 14327 } 14328 14329 return false; 14330} 14331 14332static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, 14333 TargetLowering::DAGCombinerInfo &DCI, 14334 const X86Subtarget *Subtarget) { 14335 if (DCI.isBeforeLegalizeOps()) 14336 return SDValue(); 14337 14338 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 14339 if (R.getNode()) 14340 return R; 14341 14342 EVT VT = N->getValueType(0); 14343 14344 // Create ANDN, BLSI, and BLSR instructions 14345 // BLSI is X & (-X) 14346 // BLSR is X & (X-1) 14347 if (Subtarget->hasBMI() && (VT == MVT::i32 || VT == MVT::i64)) { 14348 SDValue N0 = N->getOperand(0); 14349 SDValue N1 = N->getOperand(1); 14350 DebugLoc DL = N->getDebugLoc(); 14351 14352 // Check LHS for not 14353 if (N0.getOpcode() == ISD::XOR && isAllOnes(N0.getOperand(1))) 14354 return DAG.getNode(X86ISD::ANDN, DL, VT, N0.getOperand(0), N1); 14355 // Check RHS for not 14356 if (N1.getOpcode() == ISD::XOR && isAllOnes(N1.getOperand(1))) 14357 return DAG.getNode(X86ISD::ANDN, DL, VT, N1.getOperand(0), N0); 14358 14359 // Check LHS for neg 14360 if (N0.getOpcode() == ISD::SUB && N0.getOperand(1) == N1 && 14361 isZero(N0.getOperand(0))) 14362 return DAG.getNode(X86ISD::BLSI, DL, VT, N1); 14363 14364 // Check RHS for neg 14365 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1) == N0 && 14366 isZero(N1.getOperand(0))) 14367 return DAG.getNode(X86ISD::BLSI, DL, VT, N0); 14368 14369 // Check LHS for X-1 14370 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && 14371 isAllOnes(N0.getOperand(1))) 14372 return DAG.getNode(X86ISD::BLSR, DL, VT, N1); 14373 14374 // Check RHS for X-1 14375 if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 && 14376 isAllOnes(N1.getOperand(1))) 14377 return DAG.getNode(X86ISD::BLSR, DL, VT, N0); 14378 14379 return SDValue(); 14380 } 14381 14382 // Want to form ANDNP nodes: 14383 // 1) In the hopes of then easily combining them with OR and AND nodes 14384 // to form PBLEND/PSIGN. 14385 // 2) To match ANDN packed intrinsics 14386 if (VT != MVT::v2i64 && VT != MVT::v4i64) 14387 return SDValue(); 14388 14389 SDValue N0 = N->getOperand(0); 14390 SDValue N1 = N->getOperand(1); 14391 DebugLoc DL = N->getDebugLoc(); 14392 14393 // Check LHS for vnot 14394 if (N0.getOpcode() == ISD::XOR && 14395 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode())) 14396 CanFoldXORWithAllOnes(N0.getOperand(1).getNode())) 14397 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1); 14398 14399 // Check RHS for vnot 14400 if (N1.getOpcode() == ISD::XOR && 14401 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode())) 14402 CanFoldXORWithAllOnes(N1.getOperand(1).getNode())) 14403 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0); 14404 14405 return SDValue(); 14406} 14407 14408static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, 14409 TargetLowering::DAGCombinerInfo &DCI, 14410 const X86Subtarget *Subtarget) { 14411 if (DCI.isBeforeLegalizeOps()) 14412 return SDValue(); 14413 14414 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 14415 if (R.getNode()) 14416 return R; 14417 14418 EVT VT = N->getValueType(0); 14419 14420 SDValue N0 = N->getOperand(0); 14421 SDValue N1 = N->getOperand(1); 14422 14423 // look for psign/blend 14424 if (VT == MVT::v2i64 || VT == MVT::v4i64) { 14425 if (!Subtarget->hasSSSE3() || 14426 (VT == MVT::v4i64 && !Subtarget->hasAVX2())) 14427 return SDValue(); 14428 14429 // Canonicalize pandn to RHS 14430 if (N0.getOpcode() == X86ISD::ANDNP) 14431 std::swap(N0, N1); 14432 // or (and (m, y), (pandn m, x)) 14433 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) { 14434 SDValue Mask = N1.getOperand(0); 14435 SDValue X = N1.getOperand(1); 14436 SDValue Y; 14437 if (N0.getOperand(0) == Mask) 14438 Y = N0.getOperand(1); 14439 if (N0.getOperand(1) == Mask) 14440 Y = N0.getOperand(0); 14441 14442 // Check to see if the mask appeared in both the AND and ANDNP and 14443 if (!Y.getNode()) 14444 return SDValue(); 14445 14446 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them. 14447 // Look through mask bitcast. 14448 if (Mask.getOpcode() == ISD::BITCAST) 14449 Mask = Mask.getOperand(0); 14450 if (X.getOpcode() == ISD::BITCAST) 14451 X = X.getOperand(0); 14452 if (Y.getOpcode() == ISD::BITCAST) 14453 Y = Y.getOperand(0); 14454 14455 EVT MaskVT = Mask.getValueType(); 14456 14457 // Validate that the Mask operand is a vector sra node. 14458 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but 14459 // there is no psrai.b 14460 if (Mask.getOpcode() != X86ISD::VSRAI) 14461 return SDValue(); 14462 14463 // Check that the SRA is all signbits. 14464 SDValue SraC = Mask.getOperand(1); 14465 unsigned SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue(); 14466 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits(); 14467 if ((SraAmt + 1) != EltBits) 14468 return SDValue(); 14469 14470 DebugLoc DL = N->getDebugLoc(); 14471 14472 // Now we know we at least have a plendvb with the mask val. See if 14473 // we can form a psignb/w/d. 14474 // psign = x.type == y.type == mask.type && y = sub(0, x); 14475 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X && 14476 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) && 14477 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) { 14478 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) && 14479 "Unsupported VT for PSIGN"); 14480 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0)); 14481 return DAG.getNode(ISD::BITCAST, DL, VT, Mask); 14482 } 14483 // PBLENDVB only available on SSE 4.1 14484 if (!Subtarget->hasSSE41()) 14485 return SDValue(); 14486 14487 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8; 14488 14489 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X); 14490 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y); 14491 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask); 14492 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X); 14493 return DAG.getNode(ISD::BITCAST, DL, VT, Mask); 14494 } 14495 } 14496 14497 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64) 14498 return SDValue(); 14499 14500 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) 14501 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 14502 std::swap(N0, N1); 14503 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 14504 return SDValue(); 14505 if (!N0.hasOneUse() || !N1.hasOneUse()) 14506 return SDValue(); 14507 14508 SDValue ShAmt0 = N0.getOperand(1); 14509 if (ShAmt0.getValueType() != MVT::i8) 14510 return SDValue(); 14511 SDValue ShAmt1 = N1.getOperand(1); 14512 if (ShAmt1.getValueType() != MVT::i8) 14513 return SDValue(); 14514 if (ShAmt0.getOpcode() == ISD::TRUNCATE) 14515 ShAmt0 = ShAmt0.getOperand(0); 14516 if (ShAmt1.getOpcode() == ISD::TRUNCATE) 14517 ShAmt1 = ShAmt1.getOperand(0); 14518 14519 DebugLoc DL = N->getDebugLoc(); 14520 unsigned Opc = X86ISD::SHLD; 14521 SDValue Op0 = N0.getOperand(0); 14522 SDValue Op1 = N1.getOperand(0); 14523 if (ShAmt0.getOpcode() == ISD::SUB) { 14524 Opc = X86ISD::SHRD; 14525 std::swap(Op0, Op1); 14526 std::swap(ShAmt0, ShAmt1); 14527 } 14528 14529 unsigned Bits = VT.getSizeInBits(); 14530 if (ShAmt1.getOpcode() == ISD::SUB) { 14531 SDValue Sum = ShAmt1.getOperand(0); 14532 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) { 14533 SDValue ShAmt1Op1 = ShAmt1.getOperand(1); 14534 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE) 14535 ShAmt1Op1 = ShAmt1Op1.getOperand(0); 14536 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0) 14537 return DAG.getNode(Opc, DL, VT, 14538 Op0, Op1, 14539 DAG.getNode(ISD::TRUNCATE, DL, 14540 MVT::i8, ShAmt0)); 14541 } 14542 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) { 14543 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0); 14544 if (ShAmt0C && 14545 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits) 14546 return DAG.getNode(Opc, DL, VT, 14547 N0.getOperand(0), N1.getOperand(0), 14548 DAG.getNode(ISD::TRUNCATE, DL, 14549 MVT::i8, ShAmt0)); 14550 } 14551 14552 return SDValue(); 14553} 14554 14555// Generate NEG and CMOV for integer abs. 14556static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) { 14557 EVT VT = N->getValueType(0); 14558 14559 // Since X86 does not have CMOV for 8-bit integer, we don't convert 14560 // 8-bit integer abs to NEG and CMOV. 14561 if (VT.isInteger() && VT.getSizeInBits() == 8) 14562 return SDValue(); 14563 14564 SDValue N0 = N->getOperand(0); 14565 SDValue N1 = N->getOperand(1); 14566 DebugLoc DL = N->getDebugLoc(); 14567 14568 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1) 14569 // and change it to SUB and CMOV. 14570 if (VT.isInteger() && N->getOpcode() == ISD::XOR && 14571 N0.getOpcode() == ISD::ADD && 14572 N0.getOperand(1) == N1 && 14573 N1.getOpcode() == ISD::SRA && 14574 N1.getOperand(0) == N0.getOperand(0)) 14575 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1))) 14576 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) { 14577 // Generate SUB & CMOV. 14578 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32), 14579 DAG.getConstant(0, VT), N0.getOperand(0)); 14580 14581 SDValue Ops[] = { N0.getOperand(0), Neg, 14582 DAG.getConstant(X86::COND_GE, MVT::i8), 14583 SDValue(Neg.getNode(), 1) }; 14584 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), 14585 Ops, array_lengthof(Ops)); 14586 } 14587 return SDValue(); 14588} 14589 14590// PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes 14591static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG, 14592 TargetLowering::DAGCombinerInfo &DCI, 14593 const X86Subtarget *Subtarget) { 14594 if (DCI.isBeforeLegalizeOps()) 14595 return SDValue(); 14596 14597 if (Subtarget->hasCMov()) { 14598 SDValue RV = performIntegerAbsCombine(N, DAG); 14599 if (RV.getNode()) 14600 return RV; 14601 } 14602 14603 // Try forming BMI if it is available. 14604 if (!Subtarget->hasBMI()) 14605 return SDValue(); 14606 14607 EVT VT = N->getValueType(0); 14608 14609 if (VT != MVT::i32 && VT != MVT::i64) 14610 return SDValue(); 14611 14612 assert(Subtarget->hasBMI() && "Creating BLSMSK requires BMI instructions"); 14613 14614 // Create BLSMSK instructions by finding X ^ (X-1) 14615 SDValue N0 = N->getOperand(0); 14616 SDValue N1 = N->getOperand(1); 14617 DebugLoc DL = N->getDebugLoc(); 14618 14619 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && 14620 isAllOnes(N0.getOperand(1))) 14621 return DAG.getNode(X86ISD::BLSMSK, DL, VT, N1); 14622 14623 if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 && 14624 isAllOnes(N1.getOperand(1))) 14625 return DAG.getNode(X86ISD::BLSMSK, DL, VT, N0); 14626 14627 return SDValue(); 14628} 14629 14630/// PerformLOADCombine - Do target-specific dag combines on LOAD nodes. 14631static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, 14632 TargetLowering::DAGCombinerInfo &DCI, 14633 const X86Subtarget *Subtarget) { 14634 LoadSDNode *Ld = cast<LoadSDNode>(N); 14635 EVT RegVT = Ld->getValueType(0); 14636 EVT MemVT = Ld->getMemoryVT(); 14637 DebugLoc dl = Ld->getDebugLoc(); 14638 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14639 14640 ISD::LoadExtType Ext = Ld->getExtensionType(); 14641 14642 // If this is a vector EXT Load then attempt to optimize it using a 14643 // shuffle. We need SSE4 for the shuffles. 14644 // TODO: It is possible to support ZExt by zeroing the undef values 14645 // during the shuffle phase or after the shuffle. 14646 if (RegVT.isVector() && RegVT.isInteger() && 14647 Ext == ISD::EXTLOAD && Subtarget->hasSSE41()) { 14648 assert(MemVT != RegVT && "Cannot extend to the same type"); 14649 assert(MemVT.isVector() && "Must load a vector from memory"); 14650 14651 unsigned NumElems = RegVT.getVectorNumElements(); 14652 unsigned RegSz = RegVT.getSizeInBits(); 14653 unsigned MemSz = MemVT.getSizeInBits(); 14654 assert(RegSz > MemSz && "Register size must be greater than the mem size"); 14655 14656 // All sizes must be a power of two. 14657 if (!isPowerOf2_32(RegSz * MemSz * NumElems)) 14658 return SDValue(); 14659 14660 // Attempt to load the original value using scalar loads. 14661 // Find the largest scalar type that divides the total loaded size. 14662 MVT SclrLoadTy = MVT::i8; 14663 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 14664 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 14665 MVT Tp = (MVT::SimpleValueType)tp; 14666 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) { 14667 SclrLoadTy = Tp; 14668 } 14669 } 14670 14671 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. 14672 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 && 14673 (64 <= MemSz)) 14674 SclrLoadTy = MVT::f64; 14675 14676 // Calculate the number of scalar loads that we need to perform 14677 // in order to load our vector from memory. 14678 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits(); 14679 14680 // Represent our vector as a sequence of elements which are the 14681 // largest scalar that we can load. 14682 EVT LoadUnitVecVT = EVT::getVectorVT(*DAG.getContext(), SclrLoadTy, 14683 RegSz/SclrLoadTy.getSizeInBits()); 14684 14685 // Represent the data using the same element type that is stored in 14686 // memory. In practice, we ''widen'' MemVT. 14687 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), 14688 RegSz/MemVT.getScalarType().getSizeInBits()); 14689 14690 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() && 14691 "Invalid vector type"); 14692 14693 // We can't shuffle using an illegal type. 14694 if (!TLI.isTypeLegal(WideVecVT)) 14695 return SDValue(); 14696 14697 SmallVector<SDValue, 8> Chains; 14698 SDValue Ptr = Ld->getBasePtr(); 14699 SDValue Increment = DAG.getConstant(SclrLoadTy.getSizeInBits()/8, 14700 TLI.getPointerTy()); 14701 SDValue Res = DAG.getUNDEF(LoadUnitVecVT); 14702 14703 for (unsigned i = 0; i < NumLoads; ++i) { 14704 // Perform a single load. 14705 SDValue ScalarLoad = DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), 14706 Ptr, Ld->getPointerInfo(), 14707 Ld->isVolatile(), Ld->isNonTemporal(), 14708 Ld->isInvariant(), Ld->getAlignment()); 14709 Chains.push_back(ScalarLoad.getValue(1)); 14710 // Create the first element type using SCALAR_TO_VECTOR in order to avoid 14711 // another round of DAGCombining. 14712 if (i == 0) 14713 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad); 14714 else 14715 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res, 14716 ScalarLoad, DAG.getIntPtrConstant(i)); 14717 14718 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 14719 } 14720 14721 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], 14722 Chains.size()); 14723 14724 // Bitcast the loaded value to a vector of the original element type, in 14725 // the size of the target vector type. 14726 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res); 14727 unsigned SizeRatio = RegSz/MemSz; 14728 14729 // Redistribute the loaded elements into the different locations. 14730 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 14731 for (unsigned i = 0; i != NumElems; ++i) 14732 ShuffleVec[i*SizeRatio] = i; 14733 14734 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec, 14735 DAG.getUNDEF(WideVecVT), 14736 &ShuffleVec[0]); 14737 14738 // Bitcast to the requested type. 14739 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff); 14740 // Replace the original load with the new sequence 14741 // and return the new chain. 14742 return DCI.CombineTo(N, Shuff, TF, true); 14743 } 14744 14745 return SDValue(); 14746} 14747 14748/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 14749static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, 14750 const X86Subtarget *Subtarget) { 14751 StoreSDNode *St = cast<StoreSDNode>(N); 14752 EVT VT = St->getValue().getValueType(); 14753 EVT StVT = St->getMemoryVT(); 14754 DebugLoc dl = St->getDebugLoc(); 14755 SDValue StoredVal = St->getOperand(1); 14756 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14757 14758 // If we are saving a concatenation of two XMM registers, perform two stores. 14759 // On Sandy Bridge, 256-bit memory operations are executed by two 14760 // 128-bit ports. However, on Haswell it is better to issue a single 256-bit 14761 // memory operation. 14762 if (VT.is256BitVector() && !Subtarget->hasAVX2() && 14763 StoredVal.getNode()->getOpcode() == ISD::CONCAT_VECTORS && 14764 StoredVal.getNumOperands() == 2) { 14765 SDValue Value0 = StoredVal.getOperand(0); 14766 SDValue Value1 = StoredVal.getOperand(1); 14767 14768 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy()); 14769 SDValue Ptr0 = St->getBasePtr(); 14770 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride); 14771 14772 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0, 14773 St->getPointerInfo(), St->isVolatile(), 14774 St->isNonTemporal(), St->getAlignment()); 14775 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1, 14776 St->getPointerInfo(), St->isVolatile(), 14777 St->isNonTemporal(), St->getAlignment()); 14778 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1); 14779 } 14780 14781 // Optimize trunc store (of multiple scalars) to shuffle and store. 14782 // First, pack all of the elements in one place. Next, store to memory 14783 // in fewer chunks. 14784 if (St->isTruncatingStore() && VT.isVector()) { 14785 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14786 unsigned NumElems = VT.getVectorNumElements(); 14787 assert(StVT != VT && "Cannot truncate to the same type"); 14788 unsigned FromSz = VT.getVectorElementType().getSizeInBits(); 14789 unsigned ToSz = StVT.getVectorElementType().getSizeInBits(); 14790 14791 // From, To sizes and ElemCount must be pow of two 14792 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue(); 14793 // We are going to use the original vector elt for storing. 14794 // Accumulated smaller vector elements must be a multiple of the store size. 14795 if (0 != (NumElems * FromSz) % ToSz) return SDValue(); 14796 14797 unsigned SizeRatio = FromSz / ToSz; 14798 14799 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits()); 14800 14801 // Create a type on which we perform the shuffle 14802 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), 14803 StVT.getScalarType(), NumElems*SizeRatio); 14804 14805 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 14806 14807 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue()); 14808 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 14809 for (unsigned i = 0; i != NumElems; ++i) 14810 ShuffleVec[i] = i * SizeRatio; 14811 14812 // Can't shuffle using an illegal type. 14813 if (!TLI.isTypeLegal(WideVecVT)) 14814 return SDValue(); 14815 14816 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec, 14817 DAG.getUNDEF(WideVecVT), 14818 &ShuffleVec[0]); 14819 // At this point all of the data is stored at the bottom of the 14820 // register. We now need to save it to mem. 14821 14822 // Find the largest store unit 14823 MVT StoreType = MVT::i8; 14824 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 14825 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 14826 MVT Tp = (MVT::SimpleValueType)tp; 14827 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz) 14828 StoreType = Tp; 14829 } 14830 14831 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. 14832 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 && 14833 (64 <= NumElems * ToSz)) 14834 StoreType = MVT::f64; 14835 14836 // Bitcast the original vector into a vector of store-size units 14837 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), 14838 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits()); 14839 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 14840 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff); 14841 SmallVector<SDValue, 8> Chains; 14842 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8, 14843 TLI.getPointerTy()); 14844 SDValue Ptr = St->getBasePtr(); 14845 14846 // Perform one or more big stores into memory. 14847 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) { 14848 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 14849 StoreType, ShuffWide, 14850 DAG.getIntPtrConstant(i)); 14851 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr, 14852 St->getPointerInfo(), St->isVolatile(), 14853 St->isNonTemporal(), St->getAlignment()); 14854 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 14855 Chains.push_back(Ch); 14856 } 14857 14858 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], 14859 Chains.size()); 14860 } 14861 14862 14863 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 14864 // the FP state in cases where an emms may be missing. 14865 // A preferable solution to the general problem is to figure out the right 14866 // places to insert EMMS. This qualifies as a quick hack. 14867 14868 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode. 14869 if (VT.getSizeInBits() != 64) 14870 return SDValue(); 14871 14872 const Function *F = DAG.getMachineFunction().getFunction(); 14873 bool NoImplicitFloatOps = F->hasFnAttr(Attribute::NoImplicitFloat); 14874 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps 14875 && Subtarget->hasSSE2(); 14876 if ((VT.isVector() || 14877 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) && 14878 isa<LoadSDNode>(St->getValue()) && 14879 !cast<LoadSDNode>(St->getValue())->isVolatile() && 14880 St->getChain().hasOneUse() && !St->isVolatile()) { 14881 SDNode* LdVal = St->getValue().getNode(); 14882 LoadSDNode *Ld = 0; 14883 int TokenFactorIndex = -1; 14884 SmallVector<SDValue, 8> Ops; 14885 SDNode* ChainVal = St->getChain().getNode(); 14886 // Must be a store of a load. We currently handle two cases: the load 14887 // is a direct child, and it's under an intervening TokenFactor. It is 14888 // possible to dig deeper under nested TokenFactors. 14889 if (ChainVal == LdVal) 14890 Ld = cast<LoadSDNode>(St->getChain()); 14891 else if (St->getValue().hasOneUse() && 14892 ChainVal->getOpcode() == ISD::TokenFactor) { 14893 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) { 14894 if (ChainVal->getOperand(i).getNode() == LdVal) { 14895 TokenFactorIndex = i; 14896 Ld = cast<LoadSDNode>(St->getValue()); 14897 } else 14898 Ops.push_back(ChainVal->getOperand(i)); 14899 } 14900 } 14901 14902 if (!Ld || !ISD::isNormalLoad(Ld)) 14903 return SDValue(); 14904 14905 // If this is not the MMX case, i.e. we are just turning i64 load/store 14906 // into f64 load/store, avoid the transformation if there are multiple 14907 // uses of the loaded value. 14908 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0)) 14909 return SDValue(); 14910 14911 DebugLoc LdDL = Ld->getDebugLoc(); 14912 DebugLoc StDL = N->getDebugLoc(); 14913 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 14914 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store 14915 // pair instead. 14916 if (Subtarget->is64Bit() || F64IsLegal) { 14917 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64; 14918 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(), 14919 Ld->getPointerInfo(), Ld->isVolatile(), 14920 Ld->isNonTemporal(), Ld->isInvariant(), 14921 Ld->getAlignment()); 14922 SDValue NewChain = NewLd.getValue(1); 14923 if (TokenFactorIndex != -1) { 14924 Ops.push_back(NewChain); 14925 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 14926 Ops.size()); 14927 } 14928 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(), 14929 St->getPointerInfo(), 14930 St->isVolatile(), St->isNonTemporal(), 14931 St->getAlignment()); 14932 } 14933 14934 // Otherwise, lower to two pairs of 32-bit loads / stores. 14935 SDValue LoAddr = Ld->getBasePtr(); 14936 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr, 14937 DAG.getConstant(4, MVT::i32)); 14938 14939 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr, 14940 Ld->getPointerInfo(), 14941 Ld->isVolatile(), Ld->isNonTemporal(), 14942 Ld->isInvariant(), Ld->getAlignment()); 14943 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr, 14944 Ld->getPointerInfo().getWithOffset(4), 14945 Ld->isVolatile(), Ld->isNonTemporal(), 14946 Ld->isInvariant(), 14947 MinAlign(Ld->getAlignment(), 4)); 14948 14949 SDValue NewChain = LoLd.getValue(1); 14950 if (TokenFactorIndex != -1) { 14951 Ops.push_back(LoLd); 14952 Ops.push_back(HiLd); 14953 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 14954 Ops.size()); 14955 } 14956 14957 LoAddr = St->getBasePtr(); 14958 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr, 14959 DAG.getConstant(4, MVT::i32)); 14960 14961 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr, 14962 St->getPointerInfo(), 14963 St->isVolatile(), St->isNonTemporal(), 14964 St->getAlignment()); 14965 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr, 14966 St->getPointerInfo().getWithOffset(4), 14967 St->isVolatile(), 14968 St->isNonTemporal(), 14969 MinAlign(St->getAlignment(), 4)); 14970 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt); 14971 } 14972 return SDValue(); 14973} 14974 14975/// isHorizontalBinOp - Return 'true' if this vector operation is "horizontal" 14976/// and return the operands for the horizontal operation in LHS and RHS. A 14977/// horizontal operation performs the binary operation on successive elements 14978/// of its first operand, then on successive elements of its second operand, 14979/// returning the resulting values in a vector. For example, if 14980/// A = < float a0, float a1, float a2, float a3 > 14981/// and 14982/// B = < float b0, float b1, float b2, float b3 > 14983/// then the result of doing a horizontal operation on A and B is 14984/// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >. 14985/// In short, LHS and RHS are inspected to see if LHS op RHS is of the form 14986/// A horizontal-op B, for some already available A and B, and if so then LHS is 14987/// set to A, RHS to B, and the routine returns 'true'. 14988/// Note that the binary operation should have the property that if one of the 14989/// operands is UNDEF then the result is UNDEF. 14990static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) { 14991 // Look for the following pattern: if 14992 // A = < float a0, float a1, float a2, float a3 > 14993 // B = < float b0, float b1, float b2, float b3 > 14994 // and 14995 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6> 14996 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7> 14997 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 > 14998 // which is A horizontal-op B. 14999 15000 // At least one of the operands should be a vector shuffle. 15001 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE && 15002 RHS.getOpcode() != ISD::VECTOR_SHUFFLE) 15003 return false; 15004 15005 EVT VT = LHS.getValueType(); 15006 15007 assert((VT.is128BitVector() || VT.is256BitVector()) && 15008 "Unsupported vector type for horizontal add/sub"); 15009 15010 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to 15011 // operate independently on 128-bit lanes. 15012 unsigned NumElts = VT.getVectorNumElements(); 15013 unsigned NumLanes = VT.getSizeInBits()/128; 15014 unsigned NumLaneElts = NumElts / NumLanes; 15015 assert((NumLaneElts % 2 == 0) && 15016 "Vector type should have an even number of elements in each lane"); 15017 unsigned HalfLaneElts = NumLaneElts/2; 15018 15019 // View LHS in the form 15020 // LHS = VECTOR_SHUFFLE A, B, LMask 15021 // If LHS is not a shuffle then pretend it is the shuffle 15022 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1> 15023 // NOTE: in what follows a default initialized SDValue represents an UNDEF of 15024 // type VT. 15025 SDValue A, B; 15026 SmallVector<int, 16> LMask(NumElts); 15027 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) { 15028 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF) 15029 A = LHS.getOperand(0); 15030 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF) 15031 B = LHS.getOperand(1); 15032 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask(); 15033 std::copy(Mask.begin(), Mask.end(), LMask.begin()); 15034 } else { 15035 if (LHS.getOpcode() != ISD::UNDEF) 15036 A = LHS; 15037 for (unsigned i = 0; i != NumElts; ++i) 15038 LMask[i] = i; 15039 } 15040 15041 // Likewise, view RHS in the form 15042 // RHS = VECTOR_SHUFFLE C, D, RMask 15043 SDValue C, D; 15044 SmallVector<int, 16> RMask(NumElts); 15045 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) { 15046 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF) 15047 C = RHS.getOperand(0); 15048 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF) 15049 D = RHS.getOperand(1); 15050 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask(); 15051 std::copy(Mask.begin(), Mask.end(), RMask.begin()); 15052 } else { 15053 if (RHS.getOpcode() != ISD::UNDEF) 15054 C = RHS; 15055 for (unsigned i = 0; i != NumElts; ++i) 15056 RMask[i] = i; 15057 } 15058 15059 // Check that the shuffles are both shuffling the same vectors. 15060 if (!(A == C && B == D) && !(A == D && B == C)) 15061 return false; 15062 15063 // If everything is UNDEF then bail out: it would be better to fold to UNDEF. 15064 if (!A.getNode() && !B.getNode()) 15065 return false; 15066 15067 // If A and B occur in reverse order in RHS, then "swap" them (which means 15068 // rewriting the mask). 15069 if (A != C) 15070 CommuteVectorShuffleMask(RMask, NumElts); 15071 15072 // At this point LHS and RHS are equivalent to 15073 // LHS = VECTOR_SHUFFLE A, B, LMask 15074 // RHS = VECTOR_SHUFFLE A, B, RMask 15075 // Check that the masks correspond to performing a horizontal operation. 15076 for (unsigned i = 0; i != NumElts; ++i) { 15077 int LIdx = LMask[i], RIdx = RMask[i]; 15078 15079 // Ignore any UNDEF components. 15080 if (LIdx < 0 || RIdx < 0 || 15081 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) || 15082 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts))) 15083 continue; 15084 15085 // Check that successive elements are being operated on. If not, this is 15086 // not a horizontal operation. 15087 unsigned Src = (i/HalfLaneElts) % 2; // each lane is split between srcs 15088 unsigned LaneStart = (i/NumLaneElts) * NumLaneElts; 15089 int Index = 2*(i%HalfLaneElts) + NumElts*Src + LaneStart; 15090 if (!(LIdx == Index && RIdx == Index + 1) && 15091 !(IsCommutative && LIdx == Index + 1 && RIdx == Index)) 15092 return false; 15093 } 15094 15095 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it. 15096 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it. 15097 return true; 15098} 15099 15100/// PerformFADDCombine - Do target-specific dag combines on floating point adds. 15101static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, 15102 const X86Subtarget *Subtarget) { 15103 EVT VT = N->getValueType(0); 15104 SDValue LHS = N->getOperand(0); 15105 SDValue RHS = N->getOperand(1); 15106 15107 // Try to synthesize horizontal adds from adds of shuffles. 15108 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || 15109 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && 15110 isHorizontalBinOp(LHS, RHS, true)) 15111 return DAG.getNode(X86ISD::FHADD, N->getDebugLoc(), VT, LHS, RHS); 15112 return SDValue(); 15113} 15114 15115/// PerformFSUBCombine - Do target-specific dag combines on floating point subs. 15116static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG, 15117 const X86Subtarget *Subtarget) { 15118 EVT VT = N->getValueType(0); 15119 SDValue LHS = N->getOperand(0); 15120 SDValue RHS = N->getOperand(1); 15121 15122 // Try to synthesize horizontal subs from subs of shuffles. 15123 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || 15124 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && 15125 isHorizontalBinOp(LHS, RHS, false)) 15126 return DAG.getNode(X86ISD::FHSUB, N->getDebugLoc(), VT, LHS, RHS); 15127 return SDValue(); 15128} 15129 15130/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 15131/// X86ISD::FXOR nodes. 15132static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 15133 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 15134 // F[X]OR(0.0, x) -> x 15135 // F[X]OR(x, 0.0) -> x 15136 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 15137 if (C->getValueAPF().isPosZero()) 15138 return N->getOperand(1); 15139 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 15140 if (C->getValueAPF().isPosZero()) 15141 return N->getOperand(0); 15142 return SDValue(); 15143} 15144 15145/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 15146static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 15147 // FAND(0.0, x) -> 0.0 15148 // FAND(x, 0.0) -> 0.0 15149 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 15150 if (C->getValueAPF().isPosZero()) 15151 return N->getOperand(0); 15152 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 15153 if (C->getValueAPF().isPosZero()) 15154 return N->getOperand(1); 15155 return SDValue(); 15156} 15157 15158static SDValue PerformBTCombine(SDNode *N, 15159 SelectionDAG &DAG, 15160 TargetLowering::DAGCombinerInfo &DCI) { 15161 // BT ignores high bits in the bit index operand. 15162 SDValue Op1 = N->getOperand(1); 15163 if (Op1.hasOneUse()) { 15164 unsigned BitWidth = Op1.getValueSizeInBits(); 15165 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth)); 15166 APInt KnownZero, KnownOne; 15167 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 15168 !DCI.isBeforeLegalizeOps()); 15169 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15170 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) || 15171 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO)) 15172 DCI.CommitTargetLoweringOpt(TLO); 15173 } 15174 return SDValue(); 15175} 15176 15177static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) { 15178 SDValue Op = N->getOperand(0); 15179 if (Op.getOpcode() == ISD::BITCAST) 15180 Op = Op.getOperand(0); 15181 EVT VT = N->getValueType(0), OpVT = Op.getValueType(); 15182 if (Op.getOpcode() == X86ISD::VZEXT_LOAD && 15183 VT.getVectorElementType().getSizeInBits() == 15184 OpVT.getVectorElementType().getSizeInBits()) { 15185 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 15186 } 15187 return SDValue(); 15188} 15189 15190static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG, 15191 TargetLowering::DAGCombinerInfo &DCI, 15192 const X86Subtarget *Subtarget) { 15193 if (!DCI.isBeforeLegalizeOps()) 15194 return SDValue(); 15195 15196 if (!Subtarget->hasAVX()) 15197 return SDValue(); 15198 15199 EVT VT = N->getValueType(0); 15200 SDValue Op = N->getOperand(0); 15201 EVT OpVT = Op.getValueType(); 15202 DebugLoc dl = N->getDebugLoc(); 15203 15204 if ((VT == MVT::v4i64 && OpVT == MVT::v4i32) || 15205 (VT == MVT::v8i32 && OpVT == MVT::v8i16)) { 15206 15207 if (Subtarget->hasAVX2()) 15208 return DAG.getNode(X86ISD::VSEXT_MOVL, dl, VT, Op); 15209 15210 // Optimize vectors in AVX mode 15211 // Sign extend v8i16 to v8i32 and 15212 // v4i32 to v4i64 15213 // 15214 // Divide input vector into two parts 15215 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1} 15216 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32 15217 // concat the vectors to original VT 15218 15219 unsigned NumElems = OpVT.getVectorNumElements(); 15220 SmallVector<int,8> ShufMask1(NumElems, -1); 15221 for (unsigned i = 0; i != NumElems/2; ++i) 15222 ShufMask1[i] = i; 15223 15224 SDValue OpLo = DAG.getVectorShuffle(OpVT, dl, Op, DAG.getUNDEF(OpVT), 15225 &ShufMask1[0]); 15226 15227 SmallVector<int,8> ShufMask2(NumElems, -1); 15228 for (unsigned i = 0; i != NumElems/2; ++i) 15229 ShufMask2[i] = i + NumElems/2; 15230 15231 SDValue OpHi = DAG.getVectorShuffle(OpVT, dl, Op, DAG.getUNDEF(OpVT), 15232 &ShufMask2[0]); 15233 15234 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), 15235 VT.getVectorNumElements()/2); 15236 15237 OpLo = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpLo); 15238 OpHi = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpHi); 15239 15240 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); 15241 } 15242 return SDValue(); 15243} 15244 15245static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG, 15246 const X86Subtarget* Subtarget) { 15247 DebugLoc dl = N->getDebugLoc(); 15248 EVT VT = N->getValueType(0); 15249 15250 EVT ScalarVT = VT.getScalarType(); 15251 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget->hasFMA()) 15252 return SDValue(); 15253 15254 SDValue A = N->getOperand(0); 15255 SDValue B = N->getOperand(1); 15256 SDValue C = N->getOperand(2); 15257 15258 bool NegA = (A.getOpcode() == ISD::FNEG); 15259 bool NegB = (B.getOpcode() == ISD::FNEG); 15260 bool NegC = (C.getOpcode() == ISD::FNEG); 15261 15262 // Negative multiplication when NegA xor NegB 15263 bool NegMul = (NegA != NegB); 15264 if (NegA) 15265 A = A.getOperand(0); 15266 if (NegB) 15267 B = B.getOperand(0); 15268 if (NegC) 15269 C = C.getOperand(0); 15270 15271 unsigned Opcode; 15272 if (!NegMul) 15273 Opcode = (!NegC)? X86ISD::FMADD : X86ISD::FMSUB; 15274 else 15275 Opcode = (!NegC)? X86ISD::FNMADD : X86ISD::FNMSUB; 15276 return DAG.getNode(Opcode, dl, VT, A, B, C); 15277} 15278 15279static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, 15280 TargetLowering::DAGCombinerInfo &DCI, 15281 const X86Subtarget *Subtarget) { 15282 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) -> 15283 // (and (i32 x86isd::setcc_carry), 1) 15284 // This eliminates the zext. This transformation is necessary because 15285 // ISD::SETCC is always legalized to i8. 15286 DebugLoc dl = N->getDebugLoc(); 15287 SDValue N0 = N->getOperand(0); 15288 EVT VT = N->getValueType(0); 15289 EVT OpVT = N0.getValueType(); 15290 15291 if (N0.getOpcode() == ISD::AND && 15292 N0.hasOneUse() && 15293 N0.getOperand(0).hasOneUse()) { 15294 SDValue N00 = N0.getOperand(0); 15295 if (N00.getOpcode() != X86ISD::SETCC_CARRY) 15296 return SDValue(); 15297 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 15298 if (!C || C->getZExtValue() != 1) 15299 return SDValue(); 15300 return DAG.getNode(ISD::AND, dl, VT, 15301 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, 15302 N00.getOperand(0), N00.getOperand(1)), 15303 DAG.getConstant(1, VT)); 15304 } 15305 15306 // Optimize vectors in AVX mode: 15307 // 15308 // v8i16 -> v8i32 15309 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32. 15310 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32. 15311 // Concat upper and lower parts. 15312 // 15313 // v4i32 -> v4i64 15314 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64. 15315 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64. 15316 // Concat upper and lower parts. 15317 // 15318 if (!DCI.isBeforeLegalizeOps()) 15319 return SDValue(); 15320 15321 if (!Subtarget->hasAVX()) 15322 return SDValue(); 15323 15324 if (((VT == MVT::v8i32) && (OpVT == MVT::v8i16)) || 15325 ((VT == MVT::v4i64) && (OpVT == MVT::v4i32))) { 15326 15327 if (Subtarget->hasAVX2()) 15328 return DAG.getNode(X86ISD::VZEXT_MOVL, dl, VT, N0); 15329 15330 SDValue ZeroVec = getZeroVector(OpVT, Subtarget, DAG, dl); 15331 SDValue OpLo = getUnpackl(DAG, dl, OpVT, N0, ZeroVec); 15332 SDValue OpHi = getUnpackh(DAG, dl, OpVT, N0, ZeroVec); 15333 15334 EVT HVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 15335 VT.getVectorNumElements()/2); 15336 15337 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo); 15338 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi); 15339 15340 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); 15341 } 15342 15343 return SDValue(); 15344} 15345 15346// Optimize x == -y --> x+y == 0 15347// x != -y --> x+y != 0 15348static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) { 15349 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 15350 SDValue LHS = N->getOperand(0); 15351 SDValue RHS = N->getOperand(1); 15352 15353 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB) 15354 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0))) 15355 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) { 15356 SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(), 15357 LHS.getValueType(), RHS, LHS.getOperand(1)); 15358 return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0), 15359 addV, DAG.getConstant(0, addV.getValueType()), CC); 15360 } 15361 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB) 15362 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0))) 15363 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) { 15364 SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(), 15365 RHS.getValueType(), LHS, RHS.getOperand(1)); 15366 return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0), 15367 addV, DAG.getConstant(0, addV.getValueType()), CC); 15368 } 15369 return SDValue(); 15370} 15371 15372// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT 15373static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG) { 15374 DebugLoc DL = N->getDebugLoc(); 15375 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0)); 15376 SDValue EFLAGS = N->getOperand(1); 15377 15378 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without 15379 // a zext and produces an all-ones bit which is more useful than 0/1 in some 15380 // cases. 15381 if (CC == X86::COND_B) 15382 return DAG.getNode(ISD::AND, DL, MVT::i8, 15383 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, 15384 DAG.getConstant(CC, MVT::i8), EFLAGS), 15385 DAG.getConstant(1, MVT::i8)); 15386 15387 SDValue Flags; 15388 15389 Flags = BoolTestSetCCCombine(EFLAGS, CC); 15390 if (Flags.getNode()) { 15391 SDValue Cond = DAG.getConstant(CC, MVT::i8); 15392 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags); 15393 } 15394 15395 return SDValue(); 15396} 15397 15398// Optimize branch condition evaluation. 15399// 15400static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG, 15401 TargetLowering::DAGCombinerInfo &DCI, 15402 const X86Subtarget *Subtarget) { 15403 DebugLoc DL = N->getDebugLoc(); 15404 SDValue Chain = N->getOperand(0); 15405 SDValue Dest = N->getOperand(1); 15406 SDValue EFLAGS = N->getOperand(3); 15407 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2)); 15408 15409 SDValue Flags; 15410 15411 Flags = BoolTestSetCCCombine(EFLAGS, CC); 15412 if (Flags.getNode()) { 15413 SDValue Cond = DAG.getConstant(CC, MVT::i8); 15414 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond, 15415 Flags); 15416 } 15417 15418 return SDValue(); 15419} 15420 15421static SDValue PerformUINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG) { 15422 SDValue Op0 = N->getOperand(0); 15423 EVT InVT = Op0->getValueType(0); 15424 15425 // UINT_TO_FP(v4i8) -> SINT_TO_FP(ZEXT(v4i8 to v4i32)) 15426 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) { 15427 DebugLoc dl = N->getDebugLoc(); 15428 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32; 15429 SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0); 15430 // Notice that we use SINT_TO_FP because we know that the high bits 15431 // are zero and SINT_TO_FP is better supported by the hardware. 15432 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P); 15433 } 15434 15435 return SDValue(); 15436} 15437 15438static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, 15439 const X86TargetLowering *XTLI) { 15440 SDValue Op0 = N->getOperand(0); 15441 EVT InVT = Op0->getValueType(0); 15442 15443 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32)) 15444 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) { 15445 DebugLoc dl = N->getDebugLoc(); 15446 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32; 15447 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0); 15448 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P); 15449 } 15450 15451 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have 15452 // a 32-bit target where SSE doesn't support i64->FP operations. 15453 if (Op0.getOpcode() == ISD::LOAD) { 15454 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode()); 15455 EVT VT = Ld->getValueType(0); 15456 if (!Ld->isVolatile() && !N->getValueType(0).isVector() && 15457 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() && 15458 !XTLI->getSubtarget()->is64Bit() && 15459 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 15460 SDValue FILDChain = XTLI->BuildFILD(SDValue(N, 0), Ld->getValueType(0), 15461 Ld->getChain(), Op0, DAG); 15462 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1)); 15463 return FILDChain; 15464 } 15465 } 15466 return SDValue(); 15467} 15468 15469static SDValue PerformFP_TO_SINTCombine(SDNode *N, SelectionDAG &DAG) { 15470 EVT VT = N->getValueType(0); 15471 15472 // v4i8 = FP_TO_SINT() -> v4i8 = TRUNCATE (V4i32 = FP_TO_SINT() 15473 if (VT == MVT::v8i8 || VT == MVT::v4i8) { 15474 DebugLoc dl = N->getDebugLoc(); 15475 MVT DstVT = VT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32; 15476 SDValue I = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, N->getOperand(0)); 15477 return DAG.getNode(ISD::TRUNCATE, dl, VT, I); 15478 } 15479 15480 return SDValue(); 15481} 15482 15483// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS 15484static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, 15485 X86TargetLowering::DAGCombinerInfo &DCI) { 15486 // If the LHS and RHS of the ADC node are zero, then it can't overflow and 15487 // the result is either zero or one (depending on the input carry bit). 15488 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1. 15489 if (X86::isZeroNode(N->getOperand(0)) && 15490 X86::isZeroNode(N->getOperand(1)) && 15491 // We don't have a good way to replace an EFLAGS use, so only do this when 15492 // dead right now. 15493 SDValue(N, 1).use_empty()) { 15494 DebugLoc DL = N->getDebugLoc(); 15495 EVT VT = N->getValueType(0); 15496 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1)); 15497 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT, 15498 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, 15499 DAG.getConstant(X86::COND_B,MVT::i8), 15500 N->getOperand(2)), 15501 DAG.getConstant(1, VT)); 15502 return DCI.CombineTo(N, Res1, CarryOut); 15503 } 15504 15505 return SDValue(); 15506} 15507 15508// fold (add Y, (sete X, 0)) -> adc 0, Y 15509// (add Y, (setne X, 0)) -> sbb -1, Y 15510// (sub (sete X, 0), Y) -> sbb 0, Y 15511// (sub (setne X, 0), Y) -> adc -1, Y 15512static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) { 15513 DebugLoc DL = N->getDebugLoc(); 15514 15515 // Look through ZExts. 15516 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0); 15517 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse()) 15518 return SDValue(); 15519 15520 SDValue SetCC = Ext.getOperand(0); 15521 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse()) 15522 return SDValue(); 15523 15524 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0); 15525 if (CC != X86::COND_E && CC != X86::COND_NE) 15526 return SDValue(); 15527 15528 SDValue Cmp = SetCC.getOperand(1); 15529 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() || 15530 !X86::isZeroNode(Cmp.getOperand(1)) || 15531 !Cmp.getOperand(0).getValueType().isInteger()) 15532 return SDValue(); 15533 15534 SDValue CmpOp0 = Cmp.getOperand(0); 15535 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0, 15536 DAG.getConstant(1, CmpOp0.getValueType())); 15537 15538 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1); 15539 if (CC == X86::COND_NE) 15540 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB, 15541 DL, OtherVal.getValueType(), OtherVal, 15542 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp); 15543 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC, 15544 DL, OtherVal.getValueType(), OtherVal, 15545 DAG.getConstant(0, OtherVal.getValueType()), NewCmp); 15546} 15547 15548/// PerformADDCombine - Do target-specific dag combines on integer adds. 15549static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG, 15550 const X86Subtarget *Subtarget) { 15551 EVT VT = N->getValueType(0); 15552 SDValue Op0 = N->getOperand(0); 15553 SDValue Op1 = N->getOperand(1); 15554 15555 // Try to synthesize horizontal adds from adds of shuffles. 15556 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || 15557 (Subtarget->hasAVX2() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && 15558 isHorizontalBinOp(Op0, Op1, true)) 15559 return DAG.getNode(X86ISD::HADD, N->getDebugLoc(), VT, Op0, Op1); 15560 15561 return OptimizeConditionalInDecrement(N, DAG); 15562} 15563 15564static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG, 15565 const X86Subtarget *Subtarget) { 15566 SDValue Op0 = N->getOperand(0); 15567 SDValue Op1 = N->getOperand(1); 15568 15569 // X86 can't encode an immediate LHS of a sub. See if we can push the 15570 // negation into a preceding instruction. 15571 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) { 15572 // If the RHS of the sub is a XOR with one use and a constant, invert the 15573 // immediate. Then add one to the LHS of the sub so we can turn 15574 // X-Y -> X+~Y+1, saving one register. 15575 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR && 15576 isa<ConstantSDNode>(Op1.getOperand(1))) { 15577 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue(); 15578 EVT VT = Op0.getValueType(); 15579 SDValue NewXor = DAG.getNode(ISD::XOR, Op1.getDebugLoc(), VT, 15580 Op1.getOperand(0), 15581 DAG.getConstant(~XorC, VT)); 15582 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, NewXor, 15583 DAG.getConstant(C->getAPIntValue()+1, VT)); 15584 } 15585 } 15586 15587 // Try to synthesize horizontal adds from adds of shuffles. 15588 EVT VT = N->getValueType(0); 15589 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || 15590 (Subtarget->hasAVX2() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && 15591 isHorizontalBinOp(Op0, Op1, true)) 15592 return DAG.getNode(X86ISD::HSUB, N->getDebugLoc(), VT, Op0, Op1); 15593 15594 return OptimizeConditionalInDecrement(N, DAG); 15595} 15596 15597SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, 15598 DAGCombinerInfo &DCI) const { 15599 SelectionDAG &DAG = DCI.DAG; 15600 switch (N->getOpcode()) { 15601 default: break; 15602 case ISD::EXTRACT_VECTOR_ELT: 15603 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI); 15604 case ISD::VSELECT: 15605 case ISD::SELECT: return PerformSELECTCombine(N, DAG, DCI, Subtarget); 15606 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI); 15607 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget); 15608 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget); 15609 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI); 15610 case ISD::MUL: return PerformMulCombine(N, DAG, DCI); 15611 case ISD::SHL: 15612 case ISD::SRA: 15613 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget); 15614 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget); 15615 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget); 15616 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget); 15617 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget); 15618 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); 15619 case ISD::UINT_TO_FP: return PerformUINT_TO_FPCombine(N, DAG); 15620 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this); 15621 case ISD::FP_TO_SINT: return PerformFP_TO_SINTCombine(N, DAG); 15622 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget); 15623 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget); 15624 case X86ISD::FXOR: 15625 case X86ISD::FOR: return PerformFORCombine(N, DAG); 15626 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 15627 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); 15628 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); 15629 case ISD::ANY_EXTEND: 15630 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget); 15631 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget); 15632 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG, DCI); 15633 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG); 15634 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG); 15635 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget); 15636 case X86ISD::SHUFP: // Handle all target specific shuffles 15637 case X86ISD::PALIGN: 15638 case X86ISD::UNPCKH: 15639 case X86ISD::UNPCKL: 15640 case X86ISD::MOVHLPS: 15641 case X86ISD::MOVLHPS: 15642 case X86ISD::PSHUFD: 15643 case X86ISD::PSHUFHW: 15644 case X86ISD::PSHUFLW: 15645 case X86ISD::MOVSS: 15646 case X86ISD::MOVSD: 15647 case X86ISD::VPERMILP: 15648 case X86ISD::VPERM2X128: 15649 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget); 15650 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget); 15651 } 15652 15653 return SDValue(); 15654} 15655 15656/// isTypeDesirableForOp - Return true if the target has native support for 15657/// the specified value type and it is 'desirable' to use the type for the 15658/// given node type. e.g. On x86 i16 is legal, but undesirable since i16 15659/// instruction encodings are longer and some i16 instructions are slow. 15660bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const { 15661 if (!isTypeLegal(VT)) 15662 return false; 15663 if (VT != MVT::i16) 15664 return true; 15665 15666 switch (Opc) { 15667 default: 15668 return true; 15669 case ISD::LOAD: 15670 case ISD::SIGN_EXTEND: 15671 case ISD::ZERO_EXTEND: 15672 case ISD::ANY_EXTEND: 15673 case ISD::SHL: 15674 case ISD::SRL: 15675 case ISD::SUB: 15676 case ISD::ADD: 15677 case ISD::MUL: 15678 case ISD::AND: 15679 case ISD::OR: 15680 case ISD::XOR: 15681 return false; 15682 } 15683} 15684 15685/// IsDesirableToPromoteOp - This method query the target whether it is 15686/// beneficial for dag combiner to promote the specified node. If true, it 15687/// should return the desired promotion type by reference. 15688bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { 15689 EVT VT = Op.getValueType(); 15690 if (VT != MVT::i16) 15691 return false; 15692 15693 bool Promote = false; 15694 bool Commute = false; 15695 switch (Op.getOpcode()) { 15696 default: break; 15697 case ISD::LOAD: { 15698 LoadSDNode *LD = cast<LoadSDNode>(Op); 15699 // If the non-extending load has a single use and it's not live out, then it 15700 // might be folded. 15701 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&& 15702 Op.hasOneUse()*/) { 15703 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 15704 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 15705 // The only case where we'd want to promote LOAD (rather then it being 15706 // promoted as an operand is when it's only use is liveout. 15707 if (UI->getOpcode() != ISD::CopyToReg) 15708 return false; 15709 } 15710 } 15711 Promote = true; 15712 break; 15713 } 15714 case ISD::SIGN_EXTEND: 15715 case ISD::ZERO_EXTEND: 15716 case ISD::ANY_EXTEND: 15717 Promote = true; 15718 break; 15719 case ISD::SHL: 15720 case ISD::SRL: { 15721 SDValue N0 = Op.getOperand(0); 15722 // Look out for (store (shl (load), x)). 15723 if (MayFoldLoad(N0) && MayFoldIntoStore(Op)) 15724 return false; 15725 Promote = true; 15726 break; 15727 } 15728 case ISD::ADD: 15729 case ISD::MUL: 15730 case ISD::AND: 15731 case ISD::OR: 15732 case ISD::XOR: 15733 Commute = true; 15734 // fallthrough 15735 case ISD::SUB: { 15736 SDValue N0 = Op.getOperand(0); 15737 SDValue N1 = Op.getOperand(1); 15738 if (!Commute && MayFoldLoad(N1)) 15739 return false; 15740 // Avoid disabling potential load folding opportunities. 15741 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op))) 15742 return false; 15743 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op))) 15744 return false; 15745 Promote = true; 15746 } 15747 } 15748 15749 PVT = MVT::i32; 15750 return Promote; 15751} 15752 15753//===----------------------------------------------------------------------===// 15754// X86 Inline Assembly Support 15755//===----------------------------------------------------------------------===// 15756 15757namespace { 15758 // Helper to match a string separated by whitespace. 15759 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) { 15760 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace. 15761 15762 for (unsigned i = 0, e = args.size(); i != e; ++i) { 15763 StringRef piece(*args[i]); 15764 if (!s.startswith(piece)) // Check if the piece matches. 15765 return false; 15766 15767 s = s.substr(piece.size()); 15768 StringRef::size_type pos = s.find_first_not_of(" \t"); 15769 if (pos == 0) // We matched a prefix. 15770 return false; 15771 15772 s = s.substr(pos); 15773 } 15774 15775 return s.empty(); 15776 } 15777 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={}; 15778} 15779 15780bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { 15781 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 15782 15783 std::string AsmStr = IA->getAsmString(); 15784 15785 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 15786 if (!Ty || Ty->getBitWidth() % 16 != 0) 15787 return false; 15788 15789 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a" 15790 SmallVector<StringRef, 4> AsmPieces; 15791 SplitString(AsmStr, AsmPieces, ";\n"); 15792 15793 switch (AsmPieces.size()) { 15794 default: return false; 15795 case 1: 15796 // FIXME: this should verify that we are targeting a 486 or better. If not, 15797 // we will turn this bswap into something that will be lowered to logical 15798 // ops instead of emitting the bswap asm. For now, we don't support 486 or 15799 // lower so don't worry about this. 15800 // bswap $0 15801 if (matchAsm(AsmPieces[0], "bswap", "$0") || 15802 matchAsm(AsmPieces[0], "bswapl", "$0") || 15803 matchAsm(AsmPieces[0], "bswapq", "$0") || 15804 matchAsm(AsmPieces[0], "bswap", "${0:q}") || 15805 matchAsm(AsmPieces[0], "bswapl", "${0:q}") || 15806 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) { 15807 // No need to check constraints, nothing other than the equivalent of 15808 // "=r,0" would be valid here. 15809 return IntrinsicLowering::LowerToByteSwap(CI); 15810 } 15811 15812 // rorw $$8, ${0:w} --> llvm.bswap.i16 15813 if (CI->getType()->isIntegerTy(16) && 15814 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && 15815 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") || 15816 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) { 15817 AsmPieces.clear(); 15818 const std::string &ConstraintsStr = IA->getConstraintString(); 15819 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 15820 std::sort(AsmPieces.begin(), AsmPieces.end()); 15821 if (AsmPieces.size() == 4 && 15822 AsmPieces[0] == "~{cc}" && 15823 AsmPieces[1] == "~{dirflag}" && 15824 AsmPieces[2] == "~{flags}" && 15825 AsmPieces[3] == "~{fpsr}") 15826 return IntrinsicLowering::LowerToByteSwap(CI); 15827 } 15828 break; 15829 case 3: 15830 if (CI->getType()->isIntegerTy(32) && 15831 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && 15832 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") && 15833 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") && 15834 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) { 15835 AsmPieces.clear(); 15836 const std::string &ConstraintsStr = IA->getConstraintString(); 15837 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 15838 std::sort(AsmPieces.begin(), AsmPieces.end()); 15839 if (AsmPieces.size() == 4 && 15840 AsmPieces[0] == "~{cc}" && 15841 AsmPieces[1] == "~{dirflag}" && 15842 AsmPieces[2] == "~{flags}" && 15843 AsmPieces[3] == "~{fpsr}") 15844 return IntrinsicLowering::LowerToByteSwap(CI); 15845 } 15846 15847 if (CI->getType()->isIntegerTy(64)) { 15848 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints(); 15849 if (Constraints.size() >= 2 && 15850 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" && 15851 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") { 15852 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64 15853 if (matchAsm(AsmPieces[0], "bswap", "%eax") && 15854 matchAsm(AsmPieces[1], "bswap", "%edx") && 15855 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx")) 15856 return IntrinsicLowering::LowerToByteSwap(CI); 15857 } 15858 } 15859 break; 15860 } 15861 return false; 15862} 15863 15864 15865 15866/// getConstraintType - Given a constraint letter, return the type of 15867/// constraint it is for this target. 15868X86TargetLowering::ConstraintType 15869X86TargetLowering::getConstraintType(const std::string &Constraint) const { 15870 if (Constraint.size() == 1) { 15871 switch (Constraint[0]) { 15872 case 'R': 15873 case 'q': 15874 case 'Q': 15875 case 'f': 15876 case 't': 15877 case 'u': 15878 case 'y': 15879 case 'x': 15880 case 'Y': 15881 case 'l': 15882 return C_RegisterClass; 15883 case 'a': 15884 case 'b': 15885 case 'c': 15886 case 'd': 15887 case 'S': 15888 case 'D': 15889 case 'A': 15890 return C_Register; 15891 case 'I': 15892 case 'J': 15893 case 'K': 15894 case 'L': 15895 case 'M': 15896 case 'N': 15897 case 'G': 15898 case 'C': 15899 case 'e': 15900 case 'Z': 15901 return C_Other; 15902 default: 15903 break; 15904 } 15905 } 15906 return TargetLowering::getConstraintType(Constraint); 15907} 15908 15909/// Examine constraint type and operand type and determine a weight value. 15910/// This object must already have been set up with the operand type 15911/// and the current alternative constraint selected. 15912TargetLowering::ConstraintWeight 15913 X86TargetLowering::getSingleConstraintMatchWeight( 15914 AsmOperandInfo &info, const char *constraint) const { 15915 ConstraintWeight weight = CW_Invalid; 15916 Value *CallOperandVal = info.CallOperandVal; 15917 // If we don't have a value, we can't do a match, 15918 // but allow it at the lowest weight. 15919 if (CallOperandVal == NULL) 15920 return CW_Default; 15921 Type *type = CallOperandVal->getType(); 15922 // Look at the constraint type. 15923 switch (*constraint) { 15924 default: 15925 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 15926 case 'R': 15927 case 'q': 15928 case 'Q': 15929 case 'a': 15930 case 'b': 15931 case 'c': 15932 case 'd': 15933 case 'S': 15934 case 'D': 15935 case 'A': 15936 if (CallOperandVal->getType()->isIntegerTy()) 15937 weight = CW_SpecificReg; 15938 break; 15939 case 'f': 15940 case 't': 15941 case 'u': 15942 if (type->isFloatingPointTy()) 15943 weight = CW_SpecificReg; 15944 break; 15945 case 'y': 15946 if (type->isX86_MMXTy() && Subtarget->hasMMX()) 15947 weight = CW_SpecificReg; 15948 break; 15949 case 'x': 15950 case 'Y': 15951 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) || 15952 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasAVX())) 15953 weight = CW_Register; 15954 break; 15955 case 'I': 15956 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { 15957 if (C->getZExtValue() <= 31) 15958 weight = CW_Constant; 15959 } 15960 break; 15961 case 'J': 15962 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 15963 if (C->getZExtValue() <= 63) 15964 weight = CW_Constant; 15965 } 15966 break; 15967 case 'K': 15968 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 15969 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f)) 15970 weight = CW_Constant; 15971 } 15972 break; 15973 case 'L': 15974 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 15975 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff)) 15976 weight = CW_Constant; 15977 } 15978 break; 15979 case 'M': 15980 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 15981 if (C->getZExtValue() <= 3) 15982 weight = CW_Constant; 15983 } 15984 break; 15985 case 'N': 15986 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 15987 if (C->getZExtValue() <= 0xff) 15988 weight = CW_Constant; 15989 } 15990 break; 15991 case 'G': 15992 case 'C': 15993 if (dyn_cast<ConstantFP>(CallOperandVal)) { 15994 weight = CW_Constant; 15995 } 15996 break; 15997 case 'e': 15998 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 15999 if ((C->getSExtValue() >= -0x80000000LL) && 16000 (C->getSExtValue() <= 0x7fffffffLL)) 16001 weight = CW_Constant; 16002 } 16003 break; 16004 case 'Z': 16005 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 16006 if (C->getZExtValue() <= 0xffffffff) 16007 weight = CW_Constant; 16008 } 16009 break; 16010 } 16011 return weight; 16012} 16013 16014/// LowerXConstraint - try to replace an X constraint, which matches anything, 16015/// with another that has more specific requirements based on the type of the 16016/// corresponding operand. 16017const char *X86TargetLowering:: 16018LowerXConstraint(EVT ConstraintVT) const { 16019 // FP X constraints get lowered to SSE1/2 registers if available, otherwise 16020 // 'f' like normal targets. 16021 if (ConstraintVT.isFloatingPoint()) { 16022 if (Subtarget->hasSSE2()) 16023 return "Y"; 16024 if (Subtarget->hasSSE1()) 16025 return "x"; 16026 } 16027 16028 return TargetLowering::LowerXConstraint(ConstraintVT); 16029} 16030 16031/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 16032/// vector. If it is invalid, don't add anything to Ops. 16033void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 16034 std::string &Constraint, 16035 std::vector<SDValue>&Ops, 16036 SelectionDAG &DAG) const { 16037 SDValue Result(0, 0); 16038 16039 // Only support length 1 constraints for now. 16040 if (Constraint.length() > 1) return; 16041 16042 char ConstraintLetter = Constraint[0]; 16043 switch (ConstraintLetter) { 16044 default: break; 16045 case 'I': 16046 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 16047 if (C->getZExtValue() <= 31) { 16048 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 16049 break; 16050 } 16051 } 16052 return; 16053 case 'J': 16054 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 16055 if (C->getZExtValue() <= 63) { 16056 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 16057 break; 16058 } 16059 } 16060 return; 16061 case 'K': 16062 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 16063 if ((int8_t)C->getSExtValue() == C->getSExtValue()) { 16064 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 16065 break; 16066 } 16067 } 16068 return; 16069 case 'N': 16070 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 16071 if (C->getZExtValue() <= 255) { 16072 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 16073 break; 16074 } 16075 } 16076 return; 16077 case 'e': { 16078 // 32-bit signed value 16079 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 16080 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 16081 C->getSExtValue())) { 16082 // Widen to 64 bits here to get it sign extended. 16083 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64); 16084 break; 16085 } 16086 // FIXME gcc accepts some relocatable values here too, but only in certain 16087 // memory models; it's complicated. 16088 } 16089 return; 16090 } 16091 case 'Z': { 16092 // 32-bit unsigned value 16093 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 16094 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 16095 C->getZExtValue())) { 16096 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 16097 break; 16098 } 16099 } 16100 // FIXME gcc accepts some relocatable values here too, but only in certain 16101 // memory models; it's complicated. 16102 return; 16103 } 16104 case 'i': { 16105 // Literal immediates are always ok. 16106 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 16107 // Widen to 64 bits here to get it sign extended. 16108 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64); 16109 break; 16110 } 16111 16112 // In any sort of PIC mode addresses need to be computed at runtime by 16113 // adding in a register or some sort of table lookup. These can't 16114 // be used as immediates. 16115 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC()) 16116 return; 16117 16118 // If we are in non-pic codegen mode, we allow the address of a global (with 16119 // an optional displacement) to be used with 'i'. 16120 GlobalAddressSDNode *GA = 0; 16121 int64_t Offset = 0; 16122 16123 // Match either (GA), (GA+C), (GA+C1+C2), etc. 16124 while (1) { 16125 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) { 16126 Offset += GA->getOffset(); 16127 break; 16128 } else if (Op.getOpcode() == ISD::ADD) { 16129 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 16130 Offset += C->getZExtValue(); 16131 Op = Op.getOperand(0); 16132 continue; 16133 } 16134 } else if (Op.getOpcode() == ISD::SUB) { 16135 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 16136 Offset += -C->getZExtValue(); 16137 Op = Op.getOperand(0); 16138 continue; 16139 } 16140 } 16141 16142 // Otherwise, this isn't something we can handle, reject it. 16143 return; 16144 } 16145 16146 const GlobalValue *GV = GA->getGlobal(); 16147 // If we require an extra load to get this address, as in PIC mode, we 16148 // can't accept it. 16149 if (isGlobalStubReference(Subtarget->ClassifyGlobalReference(GV, 16150 getTargetMachine()))) 16151 return; 16152 16153 Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), 16154 GA->getValueType(0), Offset); 16155 break; 16156 } 16157 } 16158 16159 if (Result.getNode()) { 16160 Ops.push_back(Result); 16161 return; 16162 } 16163 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 16164} 16165 16166std::pair<unsigned, const TargetRegisterClass*> 16167X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 16168 EVT VT) const { 16169 // First, see if this is a constraint that directly corresponds to an LLVM 16170 // register class. 16171 if (Constraint.size() == 1) { 16172 // GCC Constraint Letters 16173 switch (Constraint[0]) { 16174 default: break; 16175 // TODO: Slight differences here in allocation order and leaving 16176 // RIP in the class. Do they matter any more here than they do 16177 // in the normal allocation? 16178 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode. 16179 if (Subtarget->is64Bit()) { 16180 if (VT == MVT::i32 || VT == MVT::f32) 16181 return std::make_pair(0U, &X86::GR32RegClass); 16182 if (VT == MVT::i16) 16183 return std::make_pair(0U, &X86::GR16RegClass); 16184 if (VT == MVT::i8 || VT == MVT::i1) 16185 return std::make_pair(0U, &X86::GR8RegClass); 16186 if (VT == MVT::i64 || VT == MVT::f64) 16187 return std::make_pair(0U, &X86::GR64RegClass); 16188 break; 16189 } 16190 // 32-bit fallthrough 16191 case 'Q': // Q_REGS 16192 if (VT == MVT::i32 || VT == MVT::f32) 16193 return std::make_pair(0U, &X86::GR32_ABCDRegClass); 16194 if (VT == MVT::i16) 16195 return std::make_pair(0U, &X86::GR16_ABCDRegClass); 16196 if (VT == MVT::i8 || VT == MVT::i1) 16197 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass); 16198 if (VT == MVT::i64) 16199 return std::make_pair(0U, &X86::GR64_ABCDRegClass); 16200 break; 16201 case 'r': // GENERAL_REGS 16202 case 'l': // INDEX_REGS 16203 if (VT == MVT::i8 || VT == MVT::i1) 16204 return std::make_pair(0U, &X86::GR8RegClass); 16205 if (VT == MVT::i16) 16206 return std::make_pair(0U, &X86::GR16RegClass); 16207 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit()) 16208 return std::make_pair(0U, &X86::GR32RegClass); 16209 return std::make_pair(0U, &X86::GR64RegClass); 16210 case 'R': // LEGACY_REGS 16211 if (VT == MVT::i8 || VT == MVT::i1) 16212 return std::make_pair(0U, &X86::GR8_NOREXRegClass); 16213 if (VT == MVT::i16) 16214 return std::make_pair(0U, &X86::GR16_NOREXRegClass); 16215 if (VT == MVT::i32 || !Subtarget->is64Bit()) 16216 return std::make_pair(0U, &X86::GR32_NOREXRegClass); 16217 return std::make_pair(0U, &X86::GR64_NOREXRegClass); 16218 case 'f': // FP Stack registers. 16219 // If SSE is enabled for this VT, use f80 to ensure the isel moves the 16220 // value to the correct fpstack register class. 16221 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) 16222 return std::make_pair(0U, &X86::RFP32RegClass); 16223 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) 16224 return std::make_pair(0U, &X86::RFP64RegClass); 16225 return std::make_pair(0U, &X86::RFP80RegClass); 16226 case 'y': // MMX_REGS if MMX allowed. 16227 if (!Subtarget->hasMMX()) break; 16228 return std::make_pair(0U, &X86::VR64RegClass); 16229 case 'Y': // SSE_REGS if SSE2 allowed 16230 if (!Subtarget->hasSSE2()) break; 16231 // FALL THROUGH. 16232 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed 16233 if (!Subtarget->hasSSE1()) break; 16234 16235 switch (VT.getSimpleVT().SimpleTy) { 16236 default: break; 16237 // Scalar SSE types. 16238 case MVT::f32: 16239 case MVT::i32: 16240 return std::make_pair(0U, &X86::FR32RegClass); 16241 case MVT::f64: 16242 case MVT::i64: 16243 return std::make_pair(0U, &X86::FR64RegClass); 16244 // Vector types. 16245 case MVT::v16i8: 16246 case MVT::v8i16: 16247 case MVT::v4i32: 16248 case MVT::v2i64: 16249 case MVT::v4f32: 16250 case MVT::v2f64: 16251 return std::make_pair(0U, &X86::VR128RegClass); 16252 // AVX types. 16253 case MVT::v32i8: 16254 case MVT::v16i16: 16255 case MVT::v8i32: 16256 case MVT::v4i64: 16257 case MVT::v8f32: 16258 case MVT::v4f64: 16259 return std::make_pair(0U, &X86::VR256RegClass); 16260 } 16261 break; 16262 } 16263 } 16264 16265 // Use the default implementation in TargetLowering to convert the register 16266 // constraint into a member of a register class. 16267 std::pair<unsigned, const TargetRegisterClass*> Res; 16268 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 16269 16270 // Not found as a standard register? 16271 if (Res.second == 0) { 16272 // Map st(0) -> st(7) -> ST0 16273 if (Constraint.size() == 7 && Constraint[0] == '{' && 16274 tolower(Constraint[1]) == 's' && 16275 tolower(Constraint[2]) == 't' && 16276 Constraint[3] == '(' && 16277 (Constraint[4] >= '0' && Constraint[4] <= '7') && 16278 Constraint[5] == ')' && 16279 Constraint[6] == '}') { 16280 16281 Res.first = X86::ST0+Constraint[4]-'0'; 16282 Res.second = &X86::RFP80RegClass; 16283 return Res; 16284 } 16285 16286 // GCC allows "st(0)" to be called just plain "st". 16287 if (StringRef("{st}").equals_lower(Constraint)) { 16288 Res.first = X86::ST0; 16289 Res.second = &X86::RFP80RegClass; 16290 return Res; 16291 } 16292 16293 // flags -> EFLAGS 16294 if (StringRef("{flags}").equals_lower(Constraint)) { 16295 Res.first = X86::EFLAGS; 16296 Res.second = &X86::CCRRegClass; 16297 return Res; 16298 } 16299 16300 // 'A' means EAX + EDX. 16301 if (Constraint == "A") { 16302 Res.first = X86::EAX; 16303 Res.second = &X86::GR32_ADRegClass; 16304 return Res; 16305 } 16306 return Res; 16307 } 16308 16309 // Otherwise, check to see if this is a register class of the wrong value 16310 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 16311 // turn into {ax},{dx}. 16312 if (Res.second->hasType(VT)) 16313 return Res; // Correct type already, nothing to do. 16314 16315 // All of the single-register GCC register classes map their values onto 16316 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 16317 // really want an 8-bit or 32-bit register, map to the appropriate register 16318 // class and return the appropriate register. 16319 if (Res.second == &X86::GR16RegClass) { 16320 if (VT == MVT::i8) { 16321 unsigned DestReg = 0; 16322 switch (Res.first) { 16323 default: break; 16324 case X86::AX: DestReg = X86::AL; break; 16325 case X86::DX: DestReg = X86::DL; break; 16326 case X86::CX: DestReg = X86::CL; break; 16327 case X86::BX: DestReg = X86::BL; break; 16328 } 16329 if (DestReg) { 16330 Res.first = DestReg; 16331 Res.second = &X86::GR8RegClass; 16332 } 16333 } else if (VT == MVT::i32) { 16334 unsigned DestReg = 0; 16335 switch (Res.first) { 16336 default: break; 16337 case X86::AX: DestReg = X86::EAX; break; 16338 case X86::DX: DestReg = X86::EDX; break; 16339 case X86::CX: DestReg = X86::ECX; break; 16340 case X86::BX: DestReg = X86::EBX; break; 16341 case X86::SI: DestReg = X86::ESI; break; 16342 case X86::DI: DestReg = X86::EDI; break; 16343 case X86::BP: DestReg = X86::EBP; break; 16344 case X86::SP: DestReg = X86::ESP; break; 16345 } 16346 if (DestReg) { 16347 Res.first = DestReg; 16348 Res.second = &X86::GR32RegClass; 16349 } 16350 } else if (VT == MVT::i64) { 16351 unsigned DestReg = 0; 16352 switch (Res.first) { 16353 default: break; 16354 case X86::AX: DestReg = X86::RAX; break; 16355 case X86::DX: DestReg = X86::RDX; break; 16356 case X86::CX: DestReg = X86::RCX; break; 16357 case X86::BX: DestReg = X86::RBX; break; 16358 case X86::SI: DestReg = X86::RSI; break; 16359 case X86::DI: DestReg = X86::RDI; break; 16360 case X86::BP: DestReg = X86::RBP; break; 16361 case X86::SP: DestReg = X86::RSP; break; 16362 } 16363 if (DestReg) { 16364 Res.first = DestReg; 16365 Res.second = &X86::GR64RegClass; 16366 } 16367 } 16368 } else if (Res.second == &X86::FR32RegClass || 16369 Res.second == &X86::FR64RegClass || 16370 Res.second == &X86::VR128RegClass) { 16371 // Handle references to XMM physical registers that got mapped into the 16372 // wrong class. This can happen with constraints like {xmm0} where the 16373 // target independent register mapper will just pick the first match it can 16374 // find, ignoring the required type. 16375 16376 if (VT == MVT::f32 || VT == MVT::i32) 16377 Res.second = &X86::FR32RegClass; 16378 else if (VT == MVT::f64 || VT == MVT::i64) 16379 Res.second = &X86::FR64RegClass; 16380 else if (X86::VR128RegClass.hasType(VT)) 16381 Res.second = &X86::VR128RegClass; 16382 else if (X86::VR256RegClass.hasType(VT)) 16383 Res.second = &X86::VR256RegClass; 16384 } 16385 16386 return Res; 16387} 16388