X86ISelLowering.cpp revision 983611836cd1edec8d1b8032e0539b6ed80461d6
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86.h" 17#include "X86InstrBuilder.h" 18#include "X86ISelLowering.h" 19#include "X86ShuffleDecode.h" 20#include "X86TargetMachine.h" 21#include "X86TargetObjectFile.h" 22#include "llvm/CallingConv.h" 23#include "llvm/Constants.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalAlias.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/Function.h" 28#include "llvm/Instructions.h" 29#include "llvm/Intrinsics.h" 30#include "llvm/LLVMContext.h" 31#include "llvm/CodeGen/MachineFrameInfo.h" 32#include "llvm/CodeGen/MachineFunction.h" 33#include "llvm/CodeGen/MachineInstrBuilder.h" 34#include "llvm/CodeGen/MachineJumpTableInfo.h" 35#include "llvm/CodeGen/MachineModuleInfo.h" 36#include "llvm/CodeGen/MachineRegisterInfo.h" 37#include "llvm/CodeGen/PseudoSourceValue.h" 38#include "llvm/MC/MCAsmInfo.h" 39#include "llvm/MC/MCContext.h" 40#include "llvm/MC/MCExpr.h" 41#include "llvm/MC/MCSymbol.h" 42#include "llvm/ADT/BitVector.h" 43#include "llvm/ADT/SmallSet.h" 44#include "llvm/ADT/Statistic.h" 45#include "llvm/ADT/StringExtras.h" 46#include "llvm/ADT/VectorExtras.h" 47#include "llvm/Support/CommandLine.h" 48#include "llvm/Support/Debug.h" 49#include "llvm/Support/Dwarf.h" 50#include "llvm/Support/ErrorHandling.h" 51#include "llvm/Support/MathExtras.h" 52#include "llvm/Support/raw_ostream.h" 53using namespace llvm; 54using namespace dwarf; 55 56STATISTIC(NumTailCalls, "Number of tail calls"); 57 58static cl::opt<bool> 59DisableMMX("disable-mmx", cl::Hidden, cl::desc("Disable use of MMX")); 60 61// Forward declarations. 62static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 63 SDValue V2); 64 65static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { 66 67 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit(); 68 69 if (TM.getSubtarget<X86Subtarget>().isTargetDarwin()) { 70 if (is64Bit) return new X8664_MachoTargetObjectFile(); 71 return new TargetLoweringObjectFileMachO(); 72 } else if (TM.getSubtarget<X86Subtarget>().isTargetELF() ){ 73 if (is64Bit) return new X8664_ELFTargetObjectFile(TM); 74 return new X8632_ELFTargetObjectFile(TM); 75 } else if (TM.getSubtarget<X86Subtarget>().isTargetCOFF()) { 76 return new TargetLoweringObjectFileCOFF(); 77 } 78 llvm_unreachable("unknown subtarget type"); 79} 80 81X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) 82 : TargetLowering(TM, createTLOF(TM)) { 83 Subtarget = &TM.getSubtarget<X86Subtarget>(); 84 X86ScalarSSEf64 = Subtarget->hasSSE2(); 85 X86ScalarSSEf32 = Subtarget->hasSSE1(); 86 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 87 88 RegInfo = TM.getRegisterInfo(); 89 TD = getTargetData(); 90 91 // Set up the TargetLowering object. 92 93 // X86 is weird, it always uses i8 for shift amounts and setcc results. 94 setShiftAmountType(MVT::i8); 95 setBooleanContents(ZeroOrOneBooleanContent); 96 setSchedulingPreference(Sched::RegPressure); 97 setStackPointerRegisterToSaveRestore(X86StackPtr); 98 99 if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) { 100 // Setup Windows compiler runtime calls. 101 setLibcallName(RTLIB::SDIV_I64, "_alldiv"); 102 setLibcallName(RTLIB::UDIV_I64, "_aulldiv"); 103 setLibcallName(RTLIB::FPTOUINT_F64_I64, "_ftol2"); 104 setLibcallName(RTLIB::FPTOUINT_F32_I64, "_ftol2"); 105 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall); 106 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall); 107 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::C); 108 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::C); 109 } 110 111 if (Subtarget->isTargetDarwin()) { 112 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 113 setUseUnderscoreSetJmp(false); 114 setUseUnderscoreLongJmp(false); 115 } else if (Subtarget->isTargetMingw()) { 116 // MS runtime is weird: it exports _setjmp, but longjmp! 117 setUseUnderscoreSetJmp(true); 118 setUseUnderscoreLongJmp(false); 119 } else { 120 setUseUnderscoreSetJmp(true); 121 setUseUnderscoreLongJmp(true); 122 } 123 124 // Set up the register classes. 125 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 126 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 127 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 128 if (Subtarget->is64Bit()) 129 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 130 131 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 132 133 // We don't accept any truncstore of integer registers. 134 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 135 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 136 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 137 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 138 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 139 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 140 141 // SETOEQ and SETUNE require checking two conditions. 142 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); 143 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); 144 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand); 145 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand); 146 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); 147 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand); 148 149 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 150 // operation. 151 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 152 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 153 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 154 155 if (Subtarget->is64Bit()) { 156 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 157 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 158 } else if (!UseSoftFloat) { 159 // We have an algorithm for SSE2->double, and we turn this into a 160 // 64-bit FILD followed by conditional FADD for other targets. 161 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 162 // We have an algorithm for SSE2, and we turn this into a 64-bit 163 // FILD for other targets. 164 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom); 165 } 166 167 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 168 // this operation. 169 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 170 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 171 172 if (!UseSoftFloat) { 173 // SSE has no i16 to fp conversion, only i32 174 if (X86ScalarSSEf32) { 175 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 176 // f32 and f64 cases are Legal, f80 case is not 177 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 178 } else { 179 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 180 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 181 } 182 } else { 183 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 184 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote); 185 } 186 187 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 188 // are Legal, f80 is custom lowered. 189 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 190 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 191 192 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 193 // this operation. 194 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 195 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 196 197 if (X86ScalarSSEf32) { 198 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 199 // f32 and f64 cases are Legal, f80 case is not 200 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 201 } else { 202 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 203 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 204 } 205 206 // Handle FP_TO_UINT by promoting the destination to a larger signed 207 // conversion. 208 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 209 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 210 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 211 212 if (Subtarget->is64Bit()) { 213 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 214 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 215 } else if (!UseSoftFloat) { 216 if (X86ScalarSSEf32 && !Subtarget->hasSSE3()) 217 // Expand FP_TO_UINT into a select. 218 // FIXME: We would like to use a Custom expander here eventually to do 219 // the optimal thing for SSE vs. the default expansion in the legalizer. 220 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 221 else 222 // With SSE3 we can use fisttpll to convert to a signed i64; without 223 // SSE, we're stuck with a fistpll. 224 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom); 225 } 226 227 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 228 if (!X86ScalarSSEf64) { 229 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 230 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 231 if (Subtarget->is64Bit()) { 232 setOperationAction(ISD::BIT_CONVERT , MVT::f64 , Expand); 233 // Without SSE, i64->f64 goes through memory. 234 setOperationAction(ISD::BIT_CONVERT , MVT::i64 , Expand); 235 } 236 } 237 238 // Scalar integer divide and remainder are lowered to use operations that 239 // produce two results, to match the available instructions. This exposes 240 // the two-result form to trivial CSE, which is able to combine x/y and x%y 241 // into a single instruction. 242 // 243 // Scalar integer multiply-high is also lowered to use two-result 244 // operations, to match the available instructions. However, plain multiply 245 // (low) operations are left as Legal, as there are single-result 246 // instructions for this in x86. Using the two-result multiply instructions 247 // when both high and low results are needed must be arranged by dagcombine. 248 setOperationAction(ISD::MULHS , MVT::i8 , Expand); 249 setOperationAction(ISD::MULHU , MVT::i8 , Expand); 250 setOperationAction(ISD::SDIV , MVT::i8 , Expand); 251 setOperationAction(ISD::UDIV , MVT::i8 , Expand); 252 setOperationAction(ISD::SREM , MVT::i8 , Expand); 253 setOperationAction(ISD::UREM , MVT::i8 , Expand); 254 setOperationAction(ISD::MULHS , MVT::i16 , Expand); 255 setOperationAction(ISD::MULHU , MVT::i16 , Expand); 256 setOperationAction(ISD::SDIV , MVT::i16 , Expand); 257 setOperationAction(ISD::UDIV , MVT::i16 , Expand); 258 setOperationAction(ISD::SREM , MVT::i16 , Expand); 259 setOperationAction(ISD::UREM , MVT::i16 , Expand); 260 setOperationAction(ISD::MULHS , MVT::i32 , Expand); 261 setOperationAction(ISD::MULHU , MVT::i32 , Expand); 262 setOperationAction(ISD::SDIV , MVT::i32 , Expand); 263 setOperationAction(ISD::UDIV , MVT::i32 , Expand); 264 setOperationAction(ISD::SREM , MVT::i32 , Expand); 265 setOperationAction(ISD::UREM , MVT::i32 , Expand); 266 setOperationAction(ISD::MULHS , MVT::i64 , Expand); 267 setOperationAction(ISD::MULHU , MVT::i64 , Expand); 268 setOperationAction(ISD::SDIV , MVT::i64 , Expand); 269 setOperationAction(ISD::UDIV , MVT::i64 , Expand); 270 setOperationAction(ISD::SREM , MVT::i64 , Expand); 271 setOperationAction(ISD::UREM , MVT::i64 , Expand); 272 273 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 274 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 275 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 276 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 277 if (Subtarget->is64Bit()) 278 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 279 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 280 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 281 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 282 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 283 setOperationAction(ISD::FREM , MVT::f32 , Expand); 284 setOperationAction(ISD::FREM , MVT::f64 , Expand); 285 setOperationAction(ISD::FREM , MVT::f80 , Expand); 286 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 287 288 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 289 setOperationAction(ISD::CTTZ , MVT::i8 , Custom); 290 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 291 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 292 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 293 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 294 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 295 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 296 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 297 if (Subtarget->is64Bit()) { 298 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 299 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 300 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 301 } 302 303 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 304 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 305 306 // These should be promoted to a larger select which is supported. 307 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 308 // X86 wants to expand cmov itself. 309 setOperationAction(ISD::SELECT , MVT::i8 , Custom); 310 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 311 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 312 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 313 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 314 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 315 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 316 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 317 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 318 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 319 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 320 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 321 if (Subtarget->is64Bit()) { 322 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 323 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 324 } 325 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 326 327 // Darwin ABI issue. 328 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 329 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 330 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 331 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 332 if (Subtarget->is64Bit()) 333 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 334 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 335 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom); 336 if (Subtarget->is64Bit()) { 337 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 338 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 339 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 340 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 341 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom); 342 } 343 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 344 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 345 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 346 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 347 if (Subtarget->is64Bit()) { 348 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 349 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 350 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 351 } 352 353 if (Subtarget->hasSSE1()) 354 setOperationAction(ISD::PREFETCH , MVT::Other, Legal); 355 356 // We may not have a libcall for MEMBARRIER so we should lower this. 357 setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom); 358 359 // On X86 and X86-64, atomic operations are lowered to locked instructions. 360 // Locked instructions, in turn, have implicit fence semantics (all memory 361 // operations are flushed before issuing the locked instruction, and they 362 // are not buffered), so we can fold away the common pattern of 363 // fence-atomic-fence. 364 setShouldFoldAtomicFences(true); 365 366 // Expand certain atomics 367 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Custom); 368 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, Custom); 369 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 370 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 371 372 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i8, Custom); 373 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i16, Custom); 374 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); 375 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 376 377 if (!Subtarget->is64Bit()) { 378 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 379 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 380 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 381 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 382 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 383 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom); 384 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 385 } 386 387 // FIXME - use subtarget debug flags 388 if (!Subtarget->isTargetDarwin() && 389 !Subtarget->isTargetELF() && 390 !Subtarget->isTargetCygMing()) { 391 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); 392 } 393 394 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 395 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 396 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 397 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 398 if (Subtarget->is64Bit()) { 399 setExceptionPointerRegister(X86::RAX); 400 setExceptionSelectorRegister(X86::RDX); 401 } else { 402 setExceptionPointerRegister(X86::EAX); 403 setExceptionSelectorRegister(X86::EDX); 404 } 405 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 406 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom); 407 408 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 409 410 setOperationAction(ISD::TRAP, MVT::Other, Legal); 411 412 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 413 setOperationAction(ISD::VASTART , MVT::Other, Custom); 414 setOperationAction(ISD::VAEND , MVT::Other, Expand); 415 if (Subtarget->is64Bit()) { 416 setOperationAction(ISD::VAARG , MVT::Other, Custom); 417 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 418 } else { 419 setOperationAction(ISD::VAARG , MVT::Other, Expand); 420 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 421 } 422 423 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 424 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 425 if (Subtarget->is64Bit()) 426 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 427 if (Subtarget->isTargetCygMing() || Subtarget->isTargetWindows()) 428 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 429 else 430 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 431 432 if (!UseSoftFloat && X86ScalarSSEf64) { 433 // f32 and f64 use SSE. 434 // Set up the FP register classes. 435 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 436 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 437 438 // Use ANDPD to simulate FABS. 439 setOperationAction(ISD::FABS , MVT::f64, Custom); 440 setOperationAction(ISD::FABS , MVT::f32, Custom); 441 442 // Use XORP to simulate FNEG. 443 setOperationAction(ISD::FNEG , MVT::f64, Custom); 444 setOperationAction(ISD::FNEG , MVT::f32, Custom); 445 446 // Use ANDPD and ORPD to simulate FCOPYSIGN. 447 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 448 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 449 450 // We don't support sin/cos/fmod 451 setOperationAction(ISD::FSIN , MVT::f64, Expand); 452 setOperationAction(ISD::FCOS , MVT::f64, Expand); 453 setOperationAction(ISD::FSIN , MVT::f32, Expand); 454 setOperationAction(ISD::FCOS , MVT::f32, Expand); 455 456 // Expand FP immediates into loads from the stack, except for the special 457 // cases we handle. 458 addLegalFPImmediate(APFloat(+0.0)); // xorpd 459 addLegalFPImmediate(APFloat(+0.0f)); // xorps 460 } else if (!UseSoftFloat && X86ScalarSSEf32) { 461 // Use SSE for f32, x87 for f64. 462 // Set up the FP register classes. 463 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 464 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 465 466 // Use ANDPS to simulate FABS. 467 setOperationAction(ISD::FABS , MVT::f32, Custom); 468 469 // Use XORP to simulate FNEG. 470 setOperationAction(ISD::FNEG , MVT::f32, Custom); 471 472 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 473 474 // Use ANDPS and ORPS to simulate FCOPYSIGN. 475 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 476 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 477 478 // We don't support sin/cos/fmod 479 setOperationAction(ISD::FSIN , MVT::f32, Expand); 480 setOperationAction(ISD::FCOS , MVT::f32, Expand); 481 482 // Special cases we handle for FP constants. 483 addLegalFPImmediate(APFloat(+0.0f)); // xorps 484 addLegalFPImmediate(APFloat(+0.0)); // FLD0 485 addLegalFPImmediate(APFloat(+1.0)); // FLD1 486 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 487 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 488 489 if (!UnsafeFPMath) { 490 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 491 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 492 } 493 } else if (!UseSoftFloat) { 494 // f32 and f64 in x87. 495 // Set up the FP register classes. 496 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 497 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 498 499 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 500 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 501 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 502 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 503 504 if (!UnsafeFPMath) { 505 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 506 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 507 } 508 addLegalFPImmediate(APFloat(+0.0)); // FLD0 509 addLegalFPImmediate(APFloat(+1.0)); // FLD1 510 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 511 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 512 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 513 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 514 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 515 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 516 } 517 518 // Long double always uses X87. 519 if (!UseSoftFloat) { 520 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 521 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 522 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 523 { 524 bool ignored; 525 APFloat TmpFlt(+0.0); 526 TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven, 527 &ignored); 528 addLegalFPImmediate(TmpFlt); // FLD0 529 TmpFlt.changeSign(); 530 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 531 APFloat TmpFlt2(+1.0); 532 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven, 533 &ignored); 534 addLegalFPImmediate(TmpFlt2); // FLD1 535 TmpFlt2.changeSign(); 536 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 537 } 538 539 if (!UnsafeFPMath) { 540 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 541 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 542 } 543 } 544 545 // Always use a library call for pow. 546 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 547 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 548 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 549 550 setOperationAction(ISD::FLOG, MVT::f80, Expand); 551 setOperationAction(ISD::FLOG2, MVT::f80, Expand); 552 setOperationAction(ISD::FLOG10, MVT::f80, Expand); 553 setOperationAction(ISD::FEXP, MVT::f80, Expand); 554 setOperationAction(ISD::FEXP2, MVT::f80, Expand); 555 556 // First set operation action for all vector types to either promote 557 // (for widening) or expand (for scalarization). Then we will selectively 558 // turn on ones that can be effectively codegen'd. 559 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 560 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 561 setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand); 562 setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand); 563 setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand); 564 setOperationAction(ISD::FNEG, (MVT::SimpleValueType)VT, Expand); 565 setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand); 566 setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand); 567 setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand); 568 setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand); 569 setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand); 570 setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand); 571 setOperationAction(ISD::SREM, (MVT::SimpleValueType)VT, Expand); 572 setOperationAction(ISD::UREM, (MVT::SimpleValueType)VT, Expand); 573 setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand); 574 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand); 575 setOperationAction(ISD::EXTRACT_VECTOR_ELT,(MVT::SimpleValueType)VT,Expand); 576 setOperationAction(ISD::EXTRACT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 577 setOperationAction(ISD::INSERT_VECTOR_ELT,(MVT::SimpleValueType)VT, Expand); 578 setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand); 579 setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand); 580 setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand); 581 setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand); 582 setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand); 583 setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand); 584 setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand); 585 setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 586 setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 587 setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand); 588 setOperationAction(ISD::UDIVREM, (MVT::SimpleValueType)VT, Expand); 589 setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand); 590 setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand); 591 setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand); 592 setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand); 593 setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand); 594 setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand); 595 setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand); 596 setOperationAction(ISD::ROTL, (MVT::SimpleValueType)VT, Expand); 597 setOperationAction(ISD::ROTR, (MVT::SimpleValueType)VT, Expand); 598 setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand); 599 setOperationAction(ISD::VSETCC, (MVT::SimpleValueType)VT, Expand); 600 setOperationAction(ISD::FLOG, (MVT::SimpleValueType)VT, Expand); 601 setOperationAction(ISD::FLOG2, (MVT::SimpleValueType)VT, Expand); 602 setOperationAction(ISD::FLOG10, (MVT::SimpleValueType)VT, Expand); 603 setOperationAction(ISD::FEXP, (MVT::SimpleValueType)VT, Expand); 604 setOperationAction(ISD::FEXP2, (MVT::SimpleValueType)VT, Expand); 605 setOperationAction(ISD::FP_TO_UINT, (MVT::SimpleValueType)VT, Expand); 606 setOperationAction(ISD::FP_TO_SINT, (MVT::SimpleValueType)VT, Expand); 607 setOperationAction(ISD::UINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 608 setOperationAction(ISD::SINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 609 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT,Expand); 610 setOperationAction(ISD::TRUNCATE, (MVT::SimpleValueType)VT, Expand); 611 setOperationAction(ISD::SIGN_EXTEND, (MVT::SimpleValueType)VT, Expand); 612 setOperationAction(ISD::ZERO_EXTEND, (MVT::SimpleValueType)VT, Expand); 613 setOperationAction(ISD::ANY_EXTEND, (MVT::SimpleValueType)VT, Expand); 614 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 615 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 616 setTruncStoreAction((MVT::SimpleValueType)VT, 617 (MVT::SimpleValueType)InnerVT, Expand); 618 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); 619 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); 620 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); 621 } 622 623 // FIXME: In order to prevent SSE instructions being expanded to MMX ones 624 // with -msoft-float, disable use of MMX as well. 625 if (!UseSoftFloat && !DisableMMX && Subtarget->hasMMX()) { 626 addRegisterClass(MVT::x86mmx, X86::VR64RegisterClass); 627 // No operations on x86mmx supported, everything uses intrinsics. 628 } 629 630 // MMX-sized vectors (other than x86mmx) are expected to be expanded 631 // into smaller operations. 632 setOperationAction(ISD::MULHS, MVT::v8i8, Expand); 633 setOperationAction(ISD::MULHS, MVT::v4i16, Expand); 634 setOperationAction(ISD::MULHS, MVT::v2i32, Expand); 635 setOperationAction(ISD::MULHS, MVT::v1i64, Expand); 636 setOperationAction(ISD::AND, MVT::v8i8, Expand); 637 setOperationAction(ISD::AND, MVT::v4i16, Expand); 638 setOperationAction(ISD::AND, MVT::v2i32, Expand); 639 setOperationAction(ISD::AND, MVT::v1i64, Expand); 640 setOperationAction(ISD::OR, MVT::v8i8, Expand); 641 setOperationAction(ISD::OR, MVT::v4i16, Expand); 642 setOperationAction(ISD::OR, MVT::v2i32, Expand); 643 setOperationAction(ISD::OR, MVT::v1i64, Expand); 644 setOperationAction(ISD::XOR, MVT::v8i8, Expand); 645 setOperationAction(ISD::XOR, MVT::v4i16, Expand); 646 setOperationAction(ISD::XOR, MVT::v2i32, Expand); 647 setOperationAction(ISD::XOR, MVT::v1i64, Expand); 648 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand); 649 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand); 650 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand); 651 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand); 652 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand); 653 setOperationAction(ISD::SELECT, MVT::v8i8, Expand); 654 setOperationAction(ISD::SELECT, MVT::v4i16, Expand); 655 setOperationAction(ISD::SELECT, MVT::v2i32, Expand); 656 setOperationAction(ISD::SELECT, MVT::v1i64, Expand); 657 setOperationAction(ISD::BIT_CONVERT, MVT::v8i8, Expand); 658 setOperationAction(ISD::BIT_CONVERT, MVT::v4i16, Expand); 659 setOperationAction(ISD::BIT_CONVERT, MVT::v2i32, Expand); 660 setOperationAction(ISD::BIT_CONVERT, MVT::v1i64, Expand); 661 662 if (!UseSoftFloat && Subtarget->hasSSE1()) { 663 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 664 665 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 666 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 667 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 668 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 669 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 670 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 671 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 672 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 673 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 674 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 675 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 676 setOperationAction(ISD::VSETCC, MVT::v4f32, Custom); 677 } 678 679 if (!UseSoftFloat && Subtarget->hasSSE2()) { 680 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 681 682 // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM 683 // registers cannot be used even for integer operations. 684 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 685 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 686 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 687 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 688 689 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 690 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 691 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 692 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 693 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 694 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 695 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 696 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 697 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 698 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 699 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 700 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 701 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 702 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 703 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 704 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 705 706 setOperationAction(ISD::VSETCC, MVT::v2f64, Custom); 707 setOperationAction(ISD::VSETCC, MVT::v16i8, Custom); 708 setOperationAction(ISD::VSETCC, MVT::v8i16, Custom); 709 setOperationAction(ISD::VSETCC, MVT::v4i32, Custom); 710 711 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 712 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 713 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 714 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 715 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 716 717 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Custom); 718 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Custom); 719 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Custom); 720 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Custom); 721 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom); 722 723 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 724 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; ++i) { 725 EVT VT = (MVT::SimpleValueType)i; 726 // Do not attempt to custom lower non-power-of-2 vectors 727 if (!isPowerOf2_32(VT.getVectorNumElements())) 728 continue; 729 // Do not attempt to custom lower non-128-bit vectors 730 if (!VT.is128BitVector()) 731 continue; 732 setOperationAction(ISD::BUILD_VECTOR, 733 VT.getSimpleVT().SimpleTy, Custom); 734 setOperationAction(ISD::VECTOR_SHUFFLE, 735 VT.getSimpleVT().SimpleTy, Custom); 736 setOperationAction(ISD::EXTRACT_VECTOR_ELT, 737 VT.getSimpleVT().SimpleTy, Custom); 738 } 739 740 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 741 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 742 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 743 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 744 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 745 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 746 747 if (Subtarget->is64Bit()) { 748 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 749 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 750 } 751 752 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 753 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; i++) { 754 MVT::SimpleValueType SVT = (MVT::SimpleValueType)i; 755 EVT VT = SVT; 756 757 // Do not attempt to promote non-128-bit vectors 758 if (!VT.is128BitVector()) 759 continue; 760 761 setOperationAction(ISD::AND, SVT, Promote); 762 AddPromotedToType (ISD::AND, SVT, MVT::v2i64); 763 setOperationAction(ISD::OR, SVT, Promote); 764 AddPromotedToType (ISD::OR, SVT, MVT::v2i64); 765 setOperationAction(ISD::XOR, SVT, Promote); 766 AddPromotedToType (ISD::XOR, SVT, MVT::v2i64); 767 setOperationAction(ISD::LOAD, SVT, Promote); 768 AddPromotedToType (ISD::LOAD, SVT, MVT::v2i64); 769 setOperationAction(ISD::SELECT, SVT, Promote); 770 AddPromotedToType (ISD::SELECT, SVT, MVT::v2i64); 771 } 772 773 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 774 775 // Custom lower v2i64 and v2f64 selects. 776 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 777 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 778 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 779 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 780 781 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 782 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 783 } 784 785 if (Subtarget->hasSSE41()) { 786 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 787 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 788 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 789 setOperationAction(ISD::FRINT, MVT::f32, Legal); 790 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 791 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 792 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 793 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 794 setOperationAction(ISD::FRINT, MVT::f64, Legal); 795 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 796 797 // FIXME: Do we need to handle scalar-to-vector here? 798 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 799 800 // Can turn SHL into an integer multiply. 801 setOperationAction(ISD::SHL, MVT::v4i32, Custom); 802 setOperationAction(ISD::SHL, MVT::v16i8, Custom); 803 804 // i8 and i16 vectors are custom , because the source register and source 805 // source memory operand types are not the same width. f32 vectors are 806 // custom since the immediate controlling the insert encodes additional 807 // information. 808 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 809 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 810 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 811 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 812 813 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 814 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 815 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom); 816 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 817 818 if (Subtarget->is64Bit()) { 819 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal); 820 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 821 } 822 } 823 824 if (Subtarget->hasSSE42()) { 825 setOperationAction(ISD::VSETCC, MVT::v2i64, Custom); 826 } 827 828 if (!UseSoftFloat && Subtarget->hasAVX()) { 829 addRegisterClass(MVT::v8f32, X86::VR256RegisterClass); 830 addRegisterClass(MVT::v4f64, X86::VR256RegisterClass); 831 addRegisterClass(MVT::v8i32, X86::VR256RegisterClass); 832 addRegisterClass(MVT::v4i64, X86::VR256RegisterClass); 833 addRegisterClass(MVT::v32i8, X86::VR256RegisterClass); 834 835 setOperationAction(ISD::LOAD, MVT::v8f32, Legal); 836 setOperationAction(ISD::LOAD, MVT::v8i32, Legal); 837 setOperationAction(ISD::LOAD, MVT::v4f64, Legal); 838 setOperationAction(ISD::LOAD, MVT::v4i64, Legal); 839 setOperationAction(ISD::FADD, MVT::v8f32, Legal); 840 setOperationAction(ISD::FSUB, MVT::v8f32, Legal); 841 setOperationAction(ISD::FMUL, MVT::v8f32, Legal); 842 setOperationAction(ISD::FDIV, MVT::v8f32, Legal); 843 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal); 844 setOperationAction(ISD::FNEG, MVT::v8f32, Custom); 845 setOperationAction(ISD::BUILD_VECTOR, MVT::v8f32, Custom); 846 //setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Custom); 847 //setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8f32, Custom); 848 //setOperationAction(ISD::SELECT, MVT::v8f32, Custom); 849 //setOperationAction(ISD::VSETCC, MVT::v8f32, Custom); 850 851 // Operations to consider commented out -v16i16 v32i8 852 //setOperationAction(ISD::ADD, MVT::v16i16, Legal); 853 setOperationAction(ISD::ADD, MVT::v8i32, Custom); 854 setOperationAction(ISD::ADD, MVT::v4i64, Custom); 855 //setOperationAction(ISD::SUB, MVT::v32i8, Legal); 856 //setOperationAction(ISD::SUB, MVT::v16i16, Legal); 857 setOperationAction(ISD::SUB, MVT::v8i32, Custom); 858 setOperationAction(ISD::SUB, MVT::v4i64, Custom); 859 //setOperationAction(ISD::MUL, MVT::v16i16, Legal); 860 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 861 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 862 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 863 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 864 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 865 setOperationAction(ISD::FNEG, MVT::v4f64, Custom); 866 867 setOperationAction(ISD::VSETCC, MVT::v4f64, Custom); 868 // setOperationAction(ISD::VSETCC, MVT::v32i8, Custom); 869 // setOperationAction(ISD::VSETCC, MVT::v16i16, Custom); 870 setOperationAction(ISD::VSETCC, MVT::v8i32, Custom); 871 872 // setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32i8, Custom); 873 // setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i16, Custom); 874 // setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i16, Custom); 875 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i32, Custom); 876 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8f32, Custom); 877 878 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 879 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i64, Custom); 880 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f64, Custom); 881 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i64, Custom); 882 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f64, Custom); 883 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f64, Custom); 884 885#if 0 886 // Not sure we want to do this since there are no 256-bit integer 887 // operations in AVX 888 889 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 890 // This includes 256-bit vectors 891 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v4i64; ++i) { 892 EVT VT = (MVT::SimpleValueType)i; 893 894 // Do not attempt to custom lower non-power-of-2 vectors 895 if (!isPowerOf2_32(VT.getVectorNumElements())) 896 continue; 897 898 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 899 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 900 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 901 } 902 903 if (Subtarget->is64Bit()) { 904 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i64, Custom); 905 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i64, Custom); 906 } 907#endif 908 909#if 0 910 // Not sure we want to do this since there are no 256-bit integer 911 // operations in AVX 912 913 // Promote v32i8, v16i16, v8i32 load, select, and, or, xor to v4i64. 914 // Including 256-bit vectors 915 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v4i64; i++) { 916 EVT VT = (MVT::SimpleValueType)i; 917 918 if (!VT.is256BitVector()) { 919 continue; 920 } 921 setOperationAction(ISD::AND, VT, Promote); 922 AddPromotedToType (ISD::AND, VT, MVT::v4i64); 923 setOperationAction(ISD::OR, VT, Promote); 924 AddPromotedToType (ISD::OR, VT, MVT::v4i64); 925 setOperationAction(ISD::XOR, VT, Promote); 926 AddPromotedToType (ISD::XOR, VT, MVT::v4i64); 927 setOperationAction(ISD::LOAD, VT, Promote); 928 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64); 929 setOperationAction(ISD::SELECT, VT, Promote); 930 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64); 931 } 932 933 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 934#endif 935 } 936 937 // We want to custom lower some of our intrinsics. 938 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 939 940 // Add/Sub/Mul with overflow operations are custom lowered. 941 setOperationAction(ISD::SADDO, MVT::i32, Custom); 942 setOperationAction(ISD::UADDO, MVT::i32, Custom); 943 setOperationAction(ISD::SSUBO, MVT::i32, Custom); 944 setOperationAction(ISD::USUBO, MVT::i32, Custom); 945 setOperationAction(ISD::SMULO, MVT::i32, Custom); 946 947 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't 948 // handle type legalization for these operations here. 949 // 950 // FIXME: We really should do custom legalization for addition and 951 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better 952 // than generic legalization for 64-bit multiplication-with-overflow, though. 953 if (Subtarget->is64Bit()) { 954 setOperationAction(ISD::SADDO, MVT::i64, Custom); 955 setOperationAction(ISD::UADDO, MVT::i64, Custom); 956 setOperationAction(ISD::SSUBO, MVT::i64, Custom); 957 setOperationAction(ISD::USUBO, MVT::i64, Custom); 958 setOperationAction(ISD::SMULO, MVT::i64, Custom); 959 } 960 961 if (!Subtarget->is64Bit()) { 962 // These libcalls are not available in 32-bit. 963 setLibcallName(RTLIB::SHL_I128, 0); 964 setLibcallName(RTLIB::SRL_I128, 0); 965 setLibcallName(RTLIB::SRA_I128, 0); 966 } 967 968 // We have target-specific dag combine patterns for the following nodes: 969 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 970 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 971 setTargetDAGCombine(ISD::BUILD_VECTOR); 972 setTargetDAGCombine(ISD::SELECT); 973 setTargetDAGCombine(ISD::SHL); 974 setTargetDAGCombine(ISD::SRA); 975 setTargetDAGCombine(ISD::SRL); 976 setTargetDAGCombine(ISD::OR); 977 setTargetDAGCombine(ISD::STORE); 978 setTargetDAGCombine(ISD::ZERO_EXTEND); 979 if (Subtarget->is64Bit()) 980 setTargetDAGCombine(ISD::MUL); 981 982 computeRegisterProperties(); 983 984 // FIXME: These should be based on subtarget info. Plus, the values should 985 // be smaller when we are in optimizing for size mode. 986 maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores 987 maxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores 988 maxStoresPerMemmove = 3; // For @llvm.memmove -> sequence of stores 989 setPrefLoopAlignment(16); 990 benefitFromCodePlacementOpt = true; 991} 992 993 994MVT::SimpleValueType X86TargetLowering::getSetCCResultType(EVT VT) const { 995 return MVT::i8; 996} 997 998 999/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1000/// the desired ByVal argument alignment. 1001static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) { 1002 if (MaxAlign == 16) 1003 return; 1004 if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1005 if (VTy->getBitWidth() == 128) 1006 MaxAlign = 16; 1007 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1008 unsigned EltAlign = 0; 1009 getMaxByValAlign(ATy->getElementType(), EltAlign); 1010 if (EltAlign > MaxAlign) 1011 MaxAlign = EltAlign; 1012 } else if (const StructType *STy = dyn_cast<StructType>(Ty)) { 1013 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1014 unsigned EltAlign = 0; 1015 getMaxByValAlign(STy->getElementType(i), EltAlign); 1016 if (EltAlign > MaxAlign) 1017 MaxAlign = EltAlign; 1018 if (MaxAlign == 16) 1019 break; 1020 } 1021 } 1022 return; 1023} 1024 1025/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1026/// function arguments in the caller parameter area. For X86, aggregates 1027/// that contain SSE vectors are placed at 16-byte boundaries while the rest 1028/// are at 4-byte boundaries. 1029unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const { 1030 if (Subtarget->is64Bit()) { 1031 // Max of 8 and alignment of type. 1032 unsigned TyAlign = TD->getABITypeAlignment(Ty); 1033 if (TyAlign > 8) 1034 return TyAlign; 1035 return 8; 1036 } 1037 1038 unsigned Align = 4; 1039 if (Subtarget->hasSSE1()) 1040 getMaxByValAlign(Ty, Align); 1041 return Align; 1042} 1043 1044/// getOptimalMemOpType - Returns the target specific optimal type for load 1045/// and store operations as a result of memset, memcpy, and memmove 1046/// lowering. If DstAlign is zero that means it's safe to destination 1047/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 1048/// means there isn't a need to check it against alignment requirement, 1049/// probably because the source does not need to be loaded. If 1050/// 'NonScalarIntSafe' is true, that means it's safe to return a 1051/// non-scalar-integer type, e.g. empty string source, constant, or loaded 1052/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 1053/// constant so it does not need to be loaded. 1054/// It returns EVT::Other if the type should be determined using generic 1055/// target-independent logic. 1056EVT 1057X86TargetLowering::getOptimalMemOpType(uint64_t Size, 1058 unsigned DstAlign, unsigned SrcAlign, 1059 bool NonScalarIntSafe, 1060 bool MemcpyStrSrc, 1061 MachineFunction &MF) const { 1062 // FIXME: This turns off use of xmm stores for memset/memcpy on targets like 1063 // linux. This is because the stack realignment code can't handle certain 1064 // cases like PR2962. This should be removed when PR2962 is fixed. 1065 const Function *F = MF.getFunction(); 1066 if (NonScalarIntSafe && 1067 !F->hasFnAttr(Attribute::NoImplicitFloat)) { 1068 if (Size >= 16 && 1069 (Subtarget->isUnalignedMemAccessFast() || 1070 ((DstAlign == 0 || DstAlign >= 16) && 1071 (SrcAlign == 0 || SrcAlign >= 16))) && 1072 Subtarget->getStackAlignment() >= 16) { 1073 if (Subtarget->hasSSE2()) 1074 return MVT::v4i32; 1075 if (Subtarget->hasSSE1()) 1076 return MVT::v4f32; 1077 } else if (!MemcpyStrSrc && Size >= 8 && 1078 !Subtarget->is64Bit() && 1079 Subtarget->getStackAlignment() >= 8 && 1080 Subtarget->hasSSE2()) { 1081 // Do not use f64 to lower memcpy if source is string constant. It's 1082 // better to use i32 to avoid the loads. 1083 return MVT::f64; 1084 } 1085 } 1086 if (Subtarget->is64Bit() && Size >= 8) 1087 return MVT::i64; 1088 return MVT::i32; 1089} 1090 1091/// getJumpTableEncoding - Return the entry encoding for a jump table in the 1092/// current function. The returned value is a member of the 1093/// MachineJumpTableInfo::JTEntryKind enum. 1094unsigned X86TargetLowering::getJumpTableEncoding() const { 1095 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF 1096 // symbol. 1097 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1098 Subtarget->isPICStyleGOT()) 1099 return MachineJumpTableInfo::EK_Custom32; 1100 1101 // Otherwise, use the normal jump table encoding heuristics. 1102 return TargetLowering::getJumpTableEncoding(); 1103} 1104 1105/// getPICBaseSymbol - Return the X86-32 PIC base. 1106MCSymbol * 1107X86TargetLowering::getPICBaseSymbol(const MachineFunction *MF, 1108 MCContext &Ctx) const { 1109 const MCAsmInfo &MAI = *getTargetMachine().getMCAsmInfo(); 1110 return Ctx.GetOrCreateSymbol(Twine(MAI.getPrivateGlobalPrefix())+ 1111 Twine(MF->getFunctionNumber())+"$pb"); 1112} 1113 1114 1115const MCExpr * 1116X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, 1117 const MachineBasicBlock *MBB, 1118 unsigned uid,MCContext &Ctx) const{ 1119 assert(getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1120 Subtarget->isPICStyleGOT()); 1121 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF 1122 // entries. 1123 return MCSymbolRefExpr::Create(MBB->getSymbol(), 1124 MCSymbolRefExpr::VK_GOTOFF, Ctx); 1125} 1126 1127/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 1128/// jumptable. 1129SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, 1130 SelectionDAG &DAG) const { 1131 if (!Subtarget->is64Bit()) 1132 // This doesn't have DebugLoc associated with it, but is not really the 1133 // same as a Register. 1134 return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()); 1135 return Table; 1136} 1137 1138/// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 1139/// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 1140/// MCExpr. 1141const MCExpr *X86TargetLowering:: 1142getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, 1143 MCContext &Ctx) const { 1144 // X86-64 uses RIP relative addressing based on the jump table label. 1145 if (Subtarget->isPICStyleRIPRel()) 1146 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 1147 1148 // Otherwise, the reference is relative to the PIC base. 1149 return MCSymbolRefExpr::Create(getPICBaseSymbol(MF, Ctx), Ctx); 1150} 1151 1152/// getFunctionAlignment - Return the Log2 alignment of this function. 1153unsigned X86TargetLowering::getFunctionAlignment(const Function *F) const { 1154 return F->hasFnAttr(Attribute::OptimizeForSize) ? 0 : 4; 1155} 1156 1157std::pair<const TargetRegisterClass*, uint8_t> 1158X86TargetLowering::findRepresentativeClass(EVT VT) const{ 1159 const TargetRegisterClass *RRC = 0; 1160 uint8_t Cost = 1; 1161 switch (VT.getSimpleVT().SimpleTy) { 1162 default: 1163 return TargetLowering::findRepresentativeClass(VT); 1164 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: 1165 RRC = (Subtarget->is64Bit() 1166 ? X86::GR64RegisterClass : X86::GR32RegisterClass); 1167 break; 1168 case MVT::x86mmx: 1169 RRC = X86::VR64RegisterClass; 1170 break; 1171 case MVT::f32: case MVT::f64: 1172 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1173 case MVT::v4f32: case MVT::v2f64: 1174 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32: 1175 case MVT::v4f64: 1176 RRC = X86::VR128RegisterClass; 1177 break; 1178 } 1179 return std::make_pair(RRC, Cost); 1180} 1181 1182unsigned 1183X86TargetLowering::getRegPressureLimit(const TargetRegisterClass *RC, 1184 MachineFunction &MF) const { 1185 unsigned FPDiff = RegInfo->hasFP(MF) ? 1 : 0; 1186 switch (RC->getID()) { 1187 default: 1188 return 0; 1189 case X86::GR32RegClassID: 1190 return 4 - FPDiff; 1191 case X86::GR64RegClassID: 1192 return 8 - FPDiff; 1193 case X86::VR128RegClassID: 1194 return Subtarget->is64Bit() ? 10 : 4; 1195 case X86::VR64RegClassID: 1196 return 4; 1197 } 1198} 1199 1200bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace, 1201 unsigned &Offset) const { 1202 if (!Subtarget->isTargetLinux()) 1203 return false; 1204 1205 if (Subtarget->is64Bit()) { 1206 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs: 1207 Offset = 0x28; 1208 if (getTargetMachine().getCodeModel() == CodeModel::Kernel) 1209 AddressSpace = 256; 1210 else 1211 AddressSpace = 257; 1212 } else { 1213 // %gs:0x14 on i386 1214 Offset = 0x14; 1215 AddressSpace = 256; 1216 } 1217 return true; 1218} 1219 1220 1221//===----------------------------------------------------------------------===// 1222// Return Value Calling Convention Implementation 1223//===----------------------------------------------------------------------===// 1224 1225#include "X86GenCallingConv.inc" 1226 1227bool 1228X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, 1229 const SmallVectorImpl<ISD::OutputArg> &Outs, 1230 LLVMContext &Context) const { 1231 SmallVector<CCValAssign, 16> RVLocs; 1232 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 1233 RVLocs, Context); 1234 return CCInfo.CheckReturn(Outs, RetCC_X86); 1235} 1236 1237SDValue 1238X86TargetLowering::LowerReturn(SDValue Chain, 1239 CallingConv::ID CallConv, bool isVarArg, 1240 const SmallVectorImpl<ISD::OutputArg> &Outs, 1241 const SmallVectorImpl<SDValue> &OutVals, 1242 DebugLoc dl, SelectionDAG &DAG) const { 1243 MachineFunction &MF = DAG.getMachineFunction(); 1244 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1245 1246 SmallVector<CCValAssign, 16> RVLocs; 1247 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 1248 RVLocs, *DAG.getContext()); 1249 CCInfo.AnalyzeReturn(Outs, RetCC_X86); 1250 1251 // Add the regs to the liveout set for the function. 1252 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 1253 for (unsigned i = 0; i != RVLocs.size(); ++i) 1254 if (RVLocs[i].isRegLoc() && !MRI.isLiveOut(RVLocs[i].getLocReg())) 1255 MRI.addLiveOut(RVLocs[i].getLocReg()); 1256 1257 SDValue Flag; 1258 1259 SmallVector<SDValue, 6> RetOps; 1260 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1261 // Operand #1 = Bytes To Pop 1262 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), 1263 MVT::i16)); 1264 1265 // Copy the result values into the output registers. 1266 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1267 CCValAssign &VA = RVLocs[i]; 1268 assert(VA.isRegLoc() && "Can only return in registers!"); 1269 SDValue ValToCopy = OutVals[i]; 1270 EVT ValVT = ValToCopy.getValueType(); 1271 1272 // If this is x86-64, and we disabled SSE, we can't return FP values, 1273 // or SSE or MMX vectors. 1274 if ((ValVT == MVT::f32 || ValVT == MVT::f64 || 1275 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) && 1276 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) { 1277 report_fatal_error("SSE register return with SSE disabled"); 1278 } 1279 // Likewise we can't return F64 values with SSE1 only. gcc does so, but 1280 // llvm-gcc has never done it right and no one has noticed, so this 1281 // should be OK for now. 1282 if (ValVT == MVT::f64 && 1283 (Subtarget->is64Bit() && !Subtarget->hasSSE2())) 1284 report_fatal_error("SSE2 register return with SSE2 disabled"); 1285 1286 // Returns in ST0/ST1 are handled specially: these are pushed as operands to 1287 // the RET instruction and handled by the FP Stackifier. 1288 if (VA.getLocReg() == X86::ST0 || 1289 VA.getLocReg() == X86::ST1) { 1290 // If this is a copy from an xmm register to ST(0), use an FPExtend to 1291 // change the value to the FP stack register class. 1292 if (isScalarFPTypeInSSEReg(VA.getValVT())) 1293 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy); 1294 RetOps.push_back(ValToCopy); 1295 // Don't emit a copytoreg. 1296 continue; 1297 } 1298 1299 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64 1300 // which is returned in RAX / RDX. 1301 if (Subtarget->is64Bit()) { 1302 if (ValVT == MVT::x86mmx) { 1303 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { 1304 ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, ValToCopy); 1305 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 1306 ValToCopy); 1307 // If we don't have SSE2 available, convert to v4f32 so the generated 1308 // register is legal. 1309 if (!Subtarget->hasSSE2()) 1310 ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32,ValToCopy); 1311 } 1312 } 1313 } 1314 1315 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag); 1316 Flag = Chain.getValue(1); 1317 } 1318 1319 // The x86-64 ABI for returning structs by value requires that we copy 1320 // the sret argument into %rax for the return. We saved the argument into 1321 // a virtual register in the entry block, so now we copy the value out 1322 // and into %rax. 1323 if (Subtarget->is64Bit() && 1324 DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { 1325 MachineFunction &MF = DAG.getMachineFunction(); 1326 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1327 unsigned Reg = FuncInfo->getSRetReturnReg(); 1328 assert(Reg && 1329 "SRetReturnReg should have been set in LowerFormalArguments()."); 1330 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); 1331 1332 Chain = DAG.getCopyToReg(Chain, dl, X86::RAX, Val, Flag); 1333 Flag = Chain.getValue(1); 1334 1335 // RAX now acts like a return value. 1336 MRI.addLiveOut(X86::RAX); 1337 } 1338 1339 RetOps[0] = Chain; // Update chain. 1340 1341 // Add the flag if we have it. 1342 if (Flag.getNode()) 1343 RetOps.push_back(Flag); 1344 1345 return DAG.getNode(X86ISD::RET_FLAG, dl, 1346 MVT::Other, &RetOps[0], RetOps.size()); 1347} 1348 1349/// LowerCallResult - Lower the result values of a call into the 1350/// appropriate copies out of appropriate physical registers. 1351/// 1352SDValue 1353X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1354 CallingConv::ID CallConv, bool isVarArg, 1355 const SmallVectorImpl<ISD::InputArg> &Ins, 1356 DebugLoc dl, SelectionDAG &DAG, 1357 SmallVectorImpl<SDValue> &InVals) const { 1358 1359 // Assign locations to each value returned by this call. 1360 SmallVector<CCValAssign, 16> RVLocs; 1361 bool Is64Bit = Subtarget->is64Bit(); 1362 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 1363 RVLocs, *DAG.getContext()); 1364 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 1365 1366 // Copy all of the result registers out of their specified physreg. 1367 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1368 CCValAssign &VA = RVLocs[i]; 1369 EVT CopyVT = VA.getValVT(); 1370 1371 // If this is x86-64, and we disabled SSE, we can't return FP values 1372 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) && 1373 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) { 1374 report_fatal_error("SSE register return with SSE disabled"); 1375 } 1376 1377 SDValue Val; 1378 1379 // If this is a call to a function that returns an fp value on the floating 1380 // point stack, we must guarantee the the value is popped from the stack, so 1381 // a CopyFromReg is not good enough - the copy instruction may be eliminated 1382 // if the return value is not used. We use the FpGET_ST0 instructions 1383 // instead. 1384 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) { 1385 // If we prefer to use the value in xmm registers, copy it out as f80 and 1386 // use a truncate to move it from fp stack reg to xmm reg. 1387 if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80; 1388 bool isST0 = VA.getLocReg() == X86::ST0; 1389 unsigned Opc = 0; 1390 if (CopyVT == MVT::f32) Opc = isST0 ? X86::FpGET_ST0_32:X86::FpGET_ST1_32; 1391 if (CopyVT == MVT::f64) Opc = isST0 ? X86::FpGET_ST0_64:X86::FpGET_ST1_64; 1392 if (CopyVT == MVT::f80) Opc = isST0 ? X86::FpGET_ST0_80:X86::FpGET_ST1_80; 1393 SDValue Ops[] = { Chain, InFlag }; 1394 Chain = SDValue(DAG.getMachineNode(Opc, dl, CopyVT, MVT::Other, MVT::Flag, 1395 Ops, 2), 1); 1396 Val = Chain.getValue(0); 1397 1398 // Round the f80 to the right size, which also moves it to the appropriate 1399 // xmm register. 1400 if (CopyVT != VA.getValVT()) 1401 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val, 1402 // This truncation won't change the value. 1403 DAG.getIntPtrConstant(1)); 1404 } else if (Is64Bit && CopyVT.isVector() && CopyVT.getSizeInBits() == 64) { 1405 // For x86-64, MMX values are returned in XMM0 / XMM1 except for v1i64. 1406 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { 1407 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), 1408 MVT::v2i64, InFlag).getValue(1); 1409 Val = Chain.getValue(0); 1410 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, 1411 Val, DAG.getConstant(0, MVT::i64)); 1412 } else { 1413 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), 1414 MVT::i64, InFlag).getValue(1); 1415 Val = Chain.getValue(0); 1416 } 1417 Val = DAG.getNode(ISD::BIT_CONVERT, dl, CopyVT, Val); 1418 } else { 1419 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), 1420 CopyVT, InFlag).getValue(1); 1421 Val = Chain.getValue(0); 1422 } 1423 InFlag = Chain.getValue(2); 1424 InVals.push_back(Val); 1425 } 1426 1427 return Chain; 1428} 1429 1430 1431//===----------------------------------------------------------------------===// 1432// C & StdCall & Fast Calling Convention implementation 1433//===----------------------------------------------------------------------===// 1434// StdCall calling convention seems to be standard for many Windows' API 1435// routines and around. It differs from C calling convention just a little: 1436// callee should clean up the stack, not caller. Symbols should be also 1437// decorated in some fancy way :) It doesn't support any vector arguments. 1438// For info on fast calling convention see Fast Calling Convention (tail call) 1439// implementation LowerX86_32FastCCCallTo. 1440 1441/// CallIsStructReturn - Determines whether a call uses struct return 1442/// semantics. 1443static bool CallIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) { 1444 if (Outs.empty()) 1445 return false; 1446 1447 return Outs[0].Flags.isSRet(); 1448} 1449 1450/// ArgsAreStructReturn - Determines whether a function uses struct 1451/// return semantics. 1452static bool 1453ArgsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) { 1454 if (Ins.empty()) 1455 return false; 1456 1457 return Ins[0].Flags.isSRet(); 1458} 1459 1460/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1461/// by "Src" to address "Dst" with size and alignment information specified by 1462/// the specific parameter attribute. The copy will be passed as a byval 1463/// function parameter. 1464static SDValue 1465CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1466 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1467 DebugLoc dl) { 1468 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1469 1470 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1471 /*isVolatile*/false, /*AlwaysInline=*/true, 1472 MachinePointerInfo(), MachinePointerInfo()); 1473} 1474 1475/// IsTailCallConvention - Return true if the calling convention is one that 1476/// supports tail call optimization. 1477static bool IsTailCallConvention(CallingConv::ID CC) { 1478 return (CC == CallingConv::Fast || CC == CallingConv::GHC); 1479} 1480 1481/// FuncIsMadeTailCallSafe - Return true if the function is being made into 1482/// a tailcall target by changing its ABI. 1483static bool FuncIsMadeTailCallSafe(CallingConv::ID CC) { 1484 return GuaranteedTailCallOpt && IsTailCallConvention(CC); 1485} 1486 1487SDValue 1488X86TargetLowering::LowerMemArgument(SDValue Chain, 1489 CallingConv::ID CallConv, 1490 const SmallVectorImpl<ISD::InputArg> &Ins, 1491 DebugLoc dl, SelectionDAG &DAG, 1492 const CCValAssign &VA, 1493 MachineFrameInfo *MFI, 1494 unsigned i) const { 1495 // Create the nodes corresponding to a load from this parameter slot. 1496 ISD::ArgFlagsTy Flags = Ins[i].Flags; 1497 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv); 1498 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); 1499 EVT ValVT; 1500 1501 // If value is passed by pointer we have address passed instead of the value 1502 // itself. 1503 if (VA.getLocInfo() == CCValAssign::Indirect) 1504 ValVT = VA.getLocVT(); 1505 else 1506 ValVT = VA.getValVT(); 1507 1508 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1509 // changed with more analysis. 1510 // In case of tail call optimization mark all arguments mutable. Since they 1511 // could be overwritten by lowering of arguments in case of a tail call. 1512 if (Flags.isByVal()) { 1513 int FI = MFI->CreateFixedObject(Flags.getByValSize(), 1514 VA.getLocMemOffset(), isImmutable); 1515 return DAG.getFrameIndex(FI, getPointerTy()); 1516 } else { 1517 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, 1518 VA.getLocMemOffset(), isImmutable); 1519 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1520 return DAG.getLoad(ValVT, dl, Chain, FIN, 1521 MachinePointerInfo::getFixedStack(FI), 1522 false, false, 0); 1523 } 1524} 1525 1526SDValue 1527X86TargetLowering::LowerFormalArguments(SDValue Chain, 1528 CallingConv::ID CallConv, 1529 bool isVarArg, 1530 const SmallVectorImpl<ISD::InputArg> &Ins, 1531 DebugLoc dl, 1532 SelectionDAG &DAG, 1533 SmallVectorImpl<SDValue> &InVals) 1534 const { 1535 MachineFunction &MF = DAG.getMachineFunction(); 1536 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1537 1538 const Function* Fn = MF.getFunction(); 1539 if (Fn->hasExternalLinkage() && 1540 Subtarget->isTargetCygMing() && 1541 Fn->getName() == "main") 1542 FuncInfo->setForceFramePointer(true); 1543 1544 MachineFrameInfo *MFI = MF.getFrameInfo(); 1545 bool Is64Bit = Subtarget->is64Bit(); 1546 bool IsWin64 = Subtarget->isTargetWin64(); 1547 1548 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 1549 "Var args not supported with calling convention fastcc or ghc"); 1550 1551 // Assign locations to all of the incoming arguments. 1552 SmallVector<CCValAssign, 16> ArgLocs; 1553 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 1554 ArgLocs, *DAG.getContext()); 1555 CCInfo.AnalyzeFormalArguments(Ins, CC_X86); 1556 1557 unsigned LastVal = ~0U; 1558 SDValue ArgValue; 1559 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1560 CCValAssign &VA = ArgLocs[i]; 1561 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1562 // places. 1563 assert(VA.getValNo() != LastVal && 1564 "Don't support value assigned to multiple locs yet"); 1565 LastVal = VA.getValNo(); 1566 1567 if (VA.isRegLoc()) { 1568 EVT RegVT = VA.getLocVT(); 1569 TargetRegisterClass *RC = NULL; 1570 if (RegVT == MVT::i32) 1571 RC = X86::GR32RegisterClass; 1572 else if (Is64Bit && RegVT == MVT::i64) 1573 RC = X86::GR64RegisterClass; 1574 else if (RegVT == MVT::f32) 1575 RC = X86::FR32RegisterClass; 1576 else if (RegVT == MVT::f64) 1577 RC = X86::FR64RegisterClass; 1578 else if (RegVT.isVector() && RegVT.getSizeInBits() == 256) 1579 RC = X86::VR256RegisterClass; 1580 else if (RegVT.isVector() && RegVT.getSizeInBits() == 128) 1581 RC = X86::VR128RegisterClass; 1582 else if (RegVT == MVT::x86mmx) 1583 RC = X86::VR64RegisterClass; 1584 else 1585 llvm_unreachable("Unknown argument type!"); 1586 1587 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1588 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 1589 1590 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1591 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1592 // right size. 1593 if (VA.getLocInfo() == CCValAssign::SExt) 1594 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 1595 DAG.getValueType(VA.getValVT())); 1596 else if (VA.getLocInfo() == CCValAssign::ZExt) 1597 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 1598 DAG.getValueType(VA.getValVT())); 1599 else if (VA.getLocInfo() == CCValAssign::BCvt) 1600 ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue); 1601 1602 if (VA.isExtInLoc()) { 1603 // Handle MMX values passed in XMM regs. 1604 if (RegVT.isVector()) { 1605 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), 1606 ArgValue); 1607 } else 1608 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 1609 } 1610 } else { 1611 assert(VA.isMemLoc()); 1612 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i); 1613 } 1614 1615 // If value is passed via pointer - do a load. 1616 if (VA.getLocInfo() == CCValAssign::Indirect) 1617 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, 1618 MachinePointerInfo(), false, false, 0); 1619 1620 InVals.push_back(ArgValue); 1621 } 1622 1623 // The x86-64 ABI for returning structs by value requires that we copy 1624 // the sret argument into %rax for the return. Save the argument into 1625 // a virtual register so that we can access it from the return points. 1626 if (Is64Bit && MF.getFunction()->hasStructRetAttr()) { 1627 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1628 unsigned Reg = FuncInfo->getSRetReturnReg(); 1629 if (!Reg) { 1630 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); 1631 FuncInfo->setSRetReturnReg(Reg); 1632 } 1633 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 1634 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 1635 } 1636 1637 unsigned StackSize = CCInfo.getNextStackOffset(); 1638 // Align stack specially for tail calls. 1639 if (FuncIsMadeTailCallSafe(CallConv)) 1640 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1641 1642 // If the function takes variable number of arguments, make a frame index for 1643 // the start of the first vararg value... for expansion of llvm.va_start. 1644 if (isVarArg) { 1645 if (!IsWin64 && (Is64Bit || (CallConv != CallingConv::X86_FastCall && 1646 CallConv != CallingConv::X86_ThisCall))) { 1647 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,true)); 1648 } 1649 if (Is64Bit) { 1650 unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0; 1651 1652 // FIXME: We should really autogenerate these arrays 1653 static const unsigned GPR64ArgRegsWin64[] = { 1654 X86::RCX, X86::RDX, X86::R8, X86::R9 1655 }; 1656 static const unsigned GPR64ArgRegs64Bit[] = { 1657 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1658 }; 1659 static const unsigned XMMArgRegs64Bit[] = { 1660 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1661 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1662 }; 1663 const unsigned *GPR64ArgRegs; 1664 unsigned NumXMMRegs = 0; 1665 1666 if (IsWin64) { 1667 // The XMM registers which might contain var arg parameters are shadowed 1668 // in their paired GPR. So we only need to save the GPR to their home 1669 // slots. 1670 TotalNumIntRegs = 4; 1671 GPR64ArgRegs = GPR64ArgRegsWin64; 1672 } else { 1673 TotalNumIntRegs = 6; TotalNumXMMRegs = 8; 1674 GPR64ArgRegs = GPR64ArgRegs64Bit; 1675 1676 NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs64Bit, TotalNumXMMRegs); 1677 } 1678 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 1679 TotalNumIntRegs); 1680 1681 bool NoImplicitFloatOps = Fn->hasFnAttr(Attribute::NoImplicitFloat); 1682 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) && 1683 "SSE register cannot be used when SSE is disabled!"); 1684 assert(!(NumXMMRegs && UseSoftFloat && NoImplicitFloatOps) && 1685 "SSE register cannot be used when SSE is disabled!"); 1686 if (UseSoftFloat || NoImplicitFloatOps || !Subtarget->hasSSE1()) 1687 // Kernel mode asks for SSE to be disabled, so don't push them 1688 // on the stack. 1689 TotalNumXMMRegs = 0; 1690 1691 if (IsWin64) { 1692 const TargetFrameInfo &TFI = *getTargetMachine().getFrameInfo(); 1693 // Get to the caller-allocated home save location. Add 8 to account 1694 // for the return address. 1695 int HomeOffset = TFI.getOffsetOfLocalArea() + 8; 1696 FuncInfo->setRegSaveFrameIndex( 1697 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false)); 1698 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex()); 1699 } else { 1700 // For X86-64, if there are vararg parameters that are passed via 1701 // registers, then we must store them to their spots on the stack so they 1702 // may be loaded by deferencing the result of va_next. 1703 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8); 1704 FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16); 1705 FuncInfo->setRegSaveFrameIndex( 1706 MFI->CreateStackObject(TotalNumIntRegs * 8 + TotalNumXMMRegs * 16, 16, 1707 false)); 1708 } 1709 1710 // Store the integer parameter registers. 1711 SmallVector<SDValue, 8> MemOps; 1712 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 1713 getPointerTy()); 1714 unsigned Offset = FuncInfo->getVarArgsGPOffset(); 1715 for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { 1716 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN, 1717 DAG.getIntPtrConstant(Offset)); 1718 unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs], 1719 X86::GR64RegisterClass); 1720 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 1721 SDValue Store = 1722 DAG.getStore(Val.getValue(1), dl, Val, FIN, 1723 MachinePointerInfo::getFixedStack( 1724 FuncInfo->getRegSaveFrameIndex(), Offset), 1725 false, false, 0); 1726 MemOps.push_back(Store); 1727 Offset += 8; 1728 } 1729 1730 if (TotalNumXMMRegs != 0 && NumXMMRegs != TotalNumXMMRegs) { 1731 // Now store the XMM (fp + vector) parameter registers. 1732 SmallVector<SDValue, 11> SaveXMMOps; 1733 SaveXMMOps.push_back(Chain); 1734 1735 unsigned AL = MF.addLiveIn(X86::AL, X86::GR8RegisterClass); 1736 SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8); 1737 SaveXMMOps.push_back(ALVal); 1738 1739 SaveXMMOps.push_back(DAG.getIntPtrConstant( 1740 FuncInfo->getRegSaveFrameIndex())); 1741 SaveXMMOps.push_back(DAG.getIntPtrConstant( 1742 FuncInfo->getVarArgsFPOffset())); 1743 1744 for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { 1745 unsigned VReg = MF.addLiveIn(XMMArgRegs64Bit[NumXMMRegs], 1746 X86::VR128RegisterClass); 1747 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32); 1748 SaveXMMOps.push_back(Val); 1749 } 1750 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl, 1751 MVT::Other, 1752 &SaveXMMOps[0], SaveXMMOps.size())); 1753 } 1754 1755 if (!MemOps.empty()) 1756 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1757 &MemOps[0], MemOps.size()); 1758 } 1759 } 1760 1761 // Some CCs need callee pop. 1762 if (Subtarget->IsCalleePop(isVarArg, CallConv)) { 1763 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything. 1764 } else { 1765 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing. 1766 // If this is an sret function, the return should pop the hidden pointer. 1767 if (!Is64Bit && !IsTailCallConvention(CallConv) && ArgsAreStructReturn(Ins)) 1768 FuncInfo->setBytesToPopOnReturn(4); 1769 } 1770 1771 if (!Is64Bit) { 1772 // RegSaveFrameIndex is X86-64 only. 1773 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA); 1774 if (CallConv == CallingConv::X86_FastCall || 1775 CallConv == CallingConv::X86_ThisCall) 1776 // fastcc functions can't have varargs. 1777 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA); 1778 } 1779 1780 return Chain; 1781} 1782 1783SDValue 1784X86TargetLowering::LowerMemOpCallTo(SDValue Chain, 1785 SDValue StackPtr, SDValue Arg, 1786 DebugLoc dl, SelectionDAG &DAG, 1787 const CCValAssign &VA, 1788 ISD::ArgFlagsTy Flags) const { 1789 const unsigned FirstStackArgOffset = (Subtarget->isTargetWin64() ? 32 : 0); 1790 unsigned LocMemOffset = FirstStackArgOffset + VA.getLocMemOffset(); 1791 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1792 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1793 if (Flags.isByVal()) 1794 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 1795 1796 return DAG.getStore(Chain, dl, Arg, PtrOff, 1797 MachinePointerInfo::getStack(LocMemOffset), 1798 false, false, 0); 1799} 1800 1801/// EmitTailCallLoadRetAddr - Emit a load of return address if tail call 1802/// optimization is performed and it is required. 1803SDValue 1804X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, 1805 SDValue &OutRetAddr, SDValue Chain, 1806 bool IsTailCall, bool Is64Bit, 1807 int FPDiff, DebugLoc dl) const { 1808 // Adjust the Return address stack slot. 1809 EVT VT = getPointerTy(); 1810 OutRetAddr = getReturnAddressFrameIndex(DAG); 1811 1812 // Load the "old" Return address. 1813 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(), 1814 false, false, 0); 1815 return SDValue(OutRetAddr.getNode(), 1); 1816} 1817 1818/// EmitTailCallStoreRetAddr - Emit a store of the return adress if tail call 1819/// optimization is performed and it is required (FPDiff!=0). 1820static SDValue 1821EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, 1822 SDValue Chain, SDValue RetAddrFrIdx, 1823 bool Is64Bit, int FPDiff, DebugLoc dl) { 1824 // Store the return address to the appropriate stack slot. 1825 if (!FPDiff) return Chain; 1826 // Calculate the new stack slot for the return address. 1827 int SlotSize = Is64Bit ? 8 : 4; 1828 int NewReturnAddrFI = 1829 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false); 1830 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 1831 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 1832 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, 1833 MachinePointerInfo::getFixedStack(NewReturnAddrFI), 1834 false, false, 0); 1835 return Chain; 1836} 1837 1838SDValue 1839X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee, 1840 CallingConv::ID CallConv, bool isVarArg, 1841 bool &isTailCall, 1842 const SmallVectorImpl<ISD::OutputArg> &Outs, 1843 const SmallVectorImpl<SDValue> &OutVals, 1844 const SmallVectorImpl<ISD::InputArg> &Ins, 1845 DebugLoc dl, SelectionDAG &DAG, 1846 SmallVectorImpl<SDValue> &InVals) const { 1847 MachineFunction &MF = DAG.getMachineFunction(); 1848 bool Is64Bit = Subtarget->is64Bit(); 1849 bool IsStructRet = CallIsStructReturn(Outs); 1850 bool IsSibcall = false; 1851 1852 if (isTailCall) { 1853 // Check if it's really possible to do a tail call. 1854 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1855 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1856 Outs, OutVals, Ins, DAG); 1857 1858 // Sibcalls are automatically detected tailcalls which do not require 1859 // ABI changes. 1860 if (!GuaranteedTailCallOpt && isTailCall) 1861 IsSibcall = true; 1862 1863 if (isTailCall) 1864 ++NumTailCalls; 1865 } 1866 1867 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 1868 "Var args not supported with calling convention fastcc or ghc"); 1869 1870 // Analyze operands of the call, assigning locations to each operand. 1871 SmallVector<CCValAssign, 16> ArgLocs; 1872 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 1873 ArgLocs, *DAG.getContext()); 1874 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 1875 1876 // Get a count of how many bytes are to be pushed on the stack. 1877 unsigned NumBytes = CCInfo.getNextStackOffset(); 1878 if (IsSibcall) 1879 // This is a sibcall. The memory operands are available in caller's 1880 // own caller's stack. 1881 NumBytes = 0; 1882 else if (GuaranteedTailCallOpt && IsTailCallConvention(CallConv)) 1883 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 1884 1885 int FPDiff = 0; 1886 if (isTailCall && !IsSibcall) { 1887 // Lower arguments at fp - stackoffset + fpdiff. 1888 unsigned NumBytesCallerPushed = 1889 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 1890 FPDiff = NumBytesCallerPushed - NumBytes; 1891 1892 // Set the delta of movement of the returnaddr stackslot. 1893 // But only set if delta is greater than previous delta. 1894 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 1895 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 1896 } 1897 1898 if (!IsSibcall) 1899 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1900 1901 SDValue RetAddrFrIdx; 1902 // Load return adress for tail calls. 1903 if (isTailCall && FPDiff) 1904 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall, 1905 Is64Bit, FPDiff, dl); 1906 1907 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 1908 SmallVector<SDValue, 8> MemOpChains; 1909 SDValue StackPtr; 1910 1911 // Walk the register/memloc assignments, inserting copies/loads. In the case 1912 // of tail call optimization arguments are handle later. 1913 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1914 CCValAssign &VA = ArgLocs[i]; 1915 EVT RegVT = VA.getLocVT(); 1916 SDValue Arg = OutVals[i]; 1917 ISD::ArgFlagsTy Flags = Outs[i].Flags; 1918 bool isByVal = Flags.isByVal(); 1919 1920 // Promote the value if needed. 1921 switch (VA.getLocInfo()) { 1922 default: llvm_unreachable("Unknown loc info!"); 1923 case CCValAssign::Full: break; 1924 case CCValAssign::SExt: 1925 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg); 1926 break; 1927 case CCValAssign::ZExt: 1928 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg); 1929 break; 1930 case CCValAssign::AExt: 1931 if (RegVT.isVector() && RegVT.getSizeInBits() == 128) { 1932 // Special case: passing MMX values in XMM registers. 1933 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg); 1934 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); 1935 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg); 1936 } else 1937 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg); 1938 break; 1939 case CCValAssign::BCvt: 1940 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, RegVT, Arg); 1941 break; 1942 case CCValAssign::Indirect: { 1943 // Store the argument. 1944 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 1945 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 1946 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot, 1947 MachinePointerInfo::getFixedStack(FI), 1948 false, false, 0); 1949 Arg = SpillSlot; 1950 break; 1951 } 1952 } 1953 1954 if (VA.isRegLoc()) { 1955 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1956 if (isVarArg && Subtarget->isTargetWin64()) { 1957 // Win64 ABI requires argument XMM reg to be copied to the corresponding 1958 // shadow reg if callee is a varargs function. 1959 unsigned ShadowReg = 0; 1960 switch (VA.getLocReg()) { 1961 case X86::XMM0: ShadowReg = X86::RCX; break; 1962 case X86::XMM1: ShadowReg = X86::RDX; break; 1963 case X86::XMM2: ShadowReg = X86::R8; break; 1964 case X86::XMM3: ShadowReg = X86::R9; break; 1965 } 1966 if (ShadowReg) 1967 RegsToPass.push_back(std::make_pair(ShadowReg, Arg)); 1968 } 1969 } else if (!IsSibcall && (!isTailCall || isByVal)) { 1970 assert(VA.isMemLoc()); 1971 if (StackPtr.getNode() == 0) 1972 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, getPointerTy()); 1973 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1974 dl, DAG, VA, Flags)); 1975 } 1976 } 1977 1978 if (!MemOpChains.empty()) 1979 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1980 &MemOpChains[0], MemOpChains.size()); 1981 1982 // Build a sequence of copy-to-reg nodes chained together with token chain 1983 // and flag operands which copy the outgoing args into registers. 1984 SDValue InFlag; 1985 // Tail call byval lowering might overwrite argument registers so in case of 1986 // tail call optimization the copies to registers are lowered later. 1987 if (!isTailCall) 1988 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1989 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1990 RegsToPass[i].second, InFlag); 1991 InFlag = Chain.getValue(1); 1992 } 1993 1994 if (Subtarget->isPICStyleGOT()) { 1995 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1996 // GOT pointer. 1997 if (!isTailCall) { 1998 Chain = DAG.getCopyToReg(Chain, dl, X86::EBX, 1999 DAG.getNode(X86ISD::GlobalBaseReg, 2000 DebugLoc(), getPointerTy()), 2001 InFlag); 2002 InFlag = Chain.getValue(1); 2003 } else { 2004 // If we are tail calling and generating PIC/GOT style code load the 2005 // address of the callee into ECX. The value in ecx is used as target of 2006 // the tail jump. This is done to circumvent the ebx/callee-saved problem 2007 // for tail calls on PIC/GOT architectures. Normally we would just put the 2008 // address of GOT into ebx and then call target@PLT. But for tail calls 2009 // ebx would be restored (since ebx is callee saved) before jumping to the 2010 // target@PLT. 2011 2012 // Note: The actual moving to ECX is done further down. 2013 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 2014 if (G && !G->getGlobal()->hasHiddenVisibility() && 2015 !G->getGlobal()->hasProtectedVisibility()) 2016 Callee = LowerGlobalAddress(Callee, DAG); 2017 else if (isa<ExternalSymbolSDNode>(Callee)) 2018 Callee = LowerExternalSymbol(Callee, DAG); 2019 } 2020 } 2021 2022 if (Is64Bit && isVarArg && !Subtarget->isTargetWin64()) { 2023 // From AMD64 ABI document: 2024 // For calls that may call functions that use varargs or stdargs 2025 // (prototype-less calls or calls to functions containing ellipsis (...) in 2026 // the declaration) %al is used as hidden argument to specify the number 2027 // of SSE registers used. The contents of %al do not need to match exactly 2028 // the number of registers, but must be an ubound on the number of SSE 2029 // registers used and is in the range 0 - 8 inclusive. 2030 2031 // Count the number of XMM registers allocated. 2032 static const unsigned XMMArgRegs[] = { 2033 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2034 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2035 }; 2036 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 2037 assert((Subtarget->hasSSE1() || !NumXMMRegs) 2038 && "SSE registers cannot be used when SSE is disabled"); 2039 2040 Chain = DAG.getCopyToReg(Chain, dl, X86::AL, 2041 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 2042 InFlag = Chain.getValue(1); 2043 } 2044 2045 2046 // For tail calls lower the arguments to the 'real' stack slot. 2047 if (isTailCall) { 2048 // Force all the incoming stack arguments to be loaded from the stack 2049 // before any new outgoing arguments are stored to the stack, because the 2050 // outgoing stack slots may alias the incoming argument stack slots, and 2051 // the alias isn't otherwise explicit. This is slightly more conservative 2052 // than necessary, because it means that each store effectively depends 2053 // on every argument instead of just those arguments it would clobber. 2054 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain); 2055 2056 SmallVector<SDValue, 8> MemOpChains2; 2057 SDValue FIN; 2058 int FI = 0; 2059 // Do not flag preceeding copytoreg stuff together with the following stuff. 2060 InFlag = SDValue(); 2061 if (GuaranteedTailCallOpt) { 2062 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2063 CCValAssign &VA = ArgLocs[i]; 2064 if (VA.isRegLoc()) 2065 continue; 2066 assert(VA.isMemLoc()); 2067 SDValue Arg = OutVals[i]; 2068 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2069 // Create frame index. 2070 int32_t Offset = VA.getLocMemOffset()+FPDiff; 2071 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; 2072 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 2073 FIN = DAG.getFrameIndex(FI, getPointerTy()); 2074 2075 if (Flags.isByVal()) { 2076 // Copy relative to framepointer. 2077 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 2078 if (StackPtr.getNode() == 0) 2079 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, 2080 getPointerTy()); 2081 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source); 2082 2083 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, 2084 ArgChain, 2085 Flags, DAG, dl)); 2086 } else { 2087 // Store relative to framepointer. 2088 MemOpChains2.push_back( 2089 DAG.getStore(ArgChain, dl, Arg, FIN, 2090 MachinePointerInfo::getFixedStack(FI), 2091 false, false, 0)); 2092 } 2093 } 2094 } 2095 2096 if (!MemOpChains2.empty()) 2097 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2098 &MemOpChains2[0], MemOpChains2.size()); 2099 2100 // Copy arguments to their registers. 2101 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2102 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2103 RegsToPass[i].second, InFlag); 2104 InFlag = Chain.getValue(1); 2105 } 2106 InFlag =SDValue(); 2107 2108 // Store the return address to the appropriate stack slot. 2109 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit, 2110 FPDiff, dl); 2111 } 2112 2113 if (getTargetMachine().getCodeModel() == CodeModel::Large) { 2114 assert(Is64Bit && "Large code model is only legal in 64-bit mode."); 2115 // In the 64-bit large code model, we have to make all calls 2116 // through a register, since the call instruction's 32-bit 2117 // pc-relative offset may not be large enough to hold the whole 2118 // address. 2119 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2120 // If the callee is a GlobalAddress node (quite common, every direct call 2121 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack 2122 // it. 2123 2124 // We should use extra load for direct calls to dllimported functions in 2125 // non-JIT mode. 2126 const GlobalValue *GV = G->getGlobal(); 2127 if (!GV->hasDLLImportLinkage()) { 2128 unsigned char OpFlags = 0; 2129 2130 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to 2131 // external symbols most go through the PLT in PIC mode. If the symbol 2132 // has hidden or protected visibility, or if it is static or local, then 2133 // we don't need to use the PLT - we can directly call it. 2134 if (Subtarget->isTargetELF() && 2135 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 2136 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) { 2137 OpFlags = X86II::MO_PLT; 2138 } else if (Subtarget->isPICStyleStubAny() && 2139 (GV->isDeclaration() || GV->isWeakForLinker()) && 2140 Subtarget->getDarwinVers() < 9) { 2141 // PC-relative references to external symbols should go through $stub, 2142 // unless we're building with the leopard linker or later, which 2143 // automatically synthesizes these stubs. 2144 OpFlags = X86II::MO_DARWIN_STUB; 2145 } 2146 2147 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 2148 G->getOffset(), OpFlags); 2149 } 2150 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2151 unsigned char OpFlags = 0; 2152 2153 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to external 2154 // symbols should go through the PLT. 2155 if (Subtarget->isTargetELF() && 2156 getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2157 OpFlags = X86II::MO_PLT; 2158 } else if (Subtarget->isPICStyleStubAny() && 2159 Subtarget->getDarwinVers() < 9) { 2160 // PC-relative references to external symbols should go through $stub, 2161 // unless we're building with the leopard linker or later, which 2162 // automatically synthesizes these stubs. 2163 OpFlags = X86II::MO_DARWIN_STUB; 2164 } 2165 2166 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(), 2167 OpFlags); 2168 } 2169 2170 // Returns a chain & a flag for retval copy to use. 2171 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 2172 SmallVector<SDValue, 8> Ops; 2173 2174 if (!IsSibcall && isTailCall) { 2175 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2176 DAG.getIntPtrConstant(0, true), InFlag); 2177 InFlag = Chain.getValue(1); 2178 } 2179 2180 Ops.push_back(Chain); 2181 Ops.push_back(Callee); 2182 2183 if (isTailCall) 2184 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 2185 2186 // Add argument registers to the end of the list so that they are known live 2187 // into the call. 2188 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2189 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2190 RegsToPass[i].second.getValueType())); 2191 2192 // Add an implicit use GOT pointer in EBX. 2193 if (!isTailCall && Subtarget->isPICStyleGOT()) 2194 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 2195 2196 // Add an implicit use of AL for non-Windows x86 64-bit vararg functions. 2197 if (Is64Bit && isVarArg && !Subtarget->isTargetWin64()) 2198 Ops.push_back(DAG.getRegister(X86::AL, MVT::i8)); 2199 2200 if (InFlag.getNode()) 2201 Ops.push_back(InFlag); 2202 2203 if (isTailCall) { 2204 // We used to do: 2205 //// If this is the first return lowered for this function, add the regs 2206 //// to the liveout set for the function. 2207 // This isn't right, although it's probably harmless on x86; liveouts 2208 // should be computed from returns not tail calls. Consider a void 2209 // function making a tail call to a function returning int. 2210 return DAG.getNode(X86ISD::TC_RETURN, dl, 2211 NodeTys, &Ops[0], Ops.size()); 2212 } 2213 2214 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); 2215 InFlag = Chain.getValue(1); 2216 2217 // Create the CALLSEQ_END node. 2218 unsigned NumBytesForCalleeToPush; 2219 if (Subtarget->IsCalleePop(isVarArg, CallConv)) 2220 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 2221 else if (!Is64Bit && !IsTailCallConvention(CallConv) && IsStructRet) 2222 // If this is a call to a struct-return function, the callee 2223 // pops the hidden struct pointer, so we have to push it back. 2224 // This is common for Darwin/X86, Linux & Mingw32 targets. 2225 NumBytesForCalleeToPush = 4; 2226 else 2227 NumBytesForCalleeToPush = 0; // Callee pops nothing. 2228 2229 // Returns a flag for retval copy to use. 2230 if (!IsSibcall) { 2231 Chain = DAG.getCALLSEQ_END(Chain, 2232 DAG.getIntPtrConstant(NumBytes, true), 2233 DAG.getIntPtrConstant(NumBytesForCalleeToPush, 2234 true), 2235 InFlag); 2236 InFlag = Chain.getValue(1); 2237 } 2238 2239 // Handle result values, copying them out of physregs into vregs that we 2240 // return. 2241 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 2242 Ins, dl, DAG, InVals); 2243} 2244 2245 2246//===----------------------------------------------------------------------===// 2247// Fast Calling Convention (tail call) implementation 2248//===----------------------------------------------------------------------===// 2249 2250// Like std call, callee cleans arguments, convention except that ECX is 2251// reserved for storing the tail called function address. Only 2 registers are 2252// free for argument passing (inreg). Tail call optimization is performed 2253// provided: 2254// * tailcallopt is enabled 2255// * caller/callee are fastcc 2256// On X86_64 architecture with GOT-style position independent code only local 2257// (within module) calls are supported at the moment. 2258// To keep the stack aligned according to platform abi the function 2259// GetAlignedArgumentStackSize ensures that argument delta is always multiples 2260// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 2261// If a tail called function callee has more arguments than the caller the 2262// caller needs to make sure that there is room to move the RETADDR to. This is 2263// achieved by reserving an area the size of the argument delta right after the 2264// original REtADDR, but before the saved framepointer or the spilled registers 2265// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 2266// stack layout: 2267// arg1 2268// arg2 2269// RETADDR 2270// [ new RETADDR 2271// move area ] 2272// (possible EBP) 2273// ESI 2274// EDI 2275// local1 .. 2276 2277/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 2278/// for a 16 byte align requirement. 2279unsigned 2280X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 2281 SelectionDAG& DAG) const { 2282 MachineFunction &MF = DAG.getMachineFunction(); 2283 const TargetMachine &TM = MF.getTarget(); 2284 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 2285 unsigned StackAlignment = TFI.getStackAlignment(); 2286 uint64_t AlignMask = StackAlignment - 1; 2287 int64_t Offset = StackSize; 2288 uint64_t SlotSize = TD->getPointerSize(); 2289 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 2290 // Number smaller than 12 so just add the difference. 2291 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 2292 } else { 2293 // Mask out lower bits, add stackalignment once plus the 12 bytes. 2294 Offset = ((~AlignMask) & Offset) + StackAlignment + 2295 (StackAlignment-SlotSize); 2296 } 2297 return Offset; 2298} 2299 2300/// MatchingStackOffset - Return true if the given stack call argument is 2301/// already available in the same position (relatively) of the caller's 2302/// incoming argument stack. 2303static 2304bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2305 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 2306 const X86InstrInfo *TII) { 2307 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 2308 int FI = INT_MAX; 2309 if (Arg.getOpcode() == ISD::CopyFromReg) { 2310 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2311 if (!VR || TargetRegisterInfo::isPhysicalRegister(VR)) 2312 return false; 2313 MachineInstr *Def = MRI->getVRegDef(VR); 2314 if (!Def) 2315 return false; 2316 if (!Flags.isByVal()) { 2317 if (!TII->isLoadFromStackSlot(Def, FI)) 2318 return false; 2319 } else { 2320 unsigned Opcode = Def->getOpcode(); 2321 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) && 2322 Def->getOperand(1).isFI()) { 2323 FI = Def->getOperand(1).getIndex(); 2324 Bytes = Flags.getByValSize(); 2325 } else 2326 return false; 2327 } 2328 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2329 if (Flags.isByVal()) 2330 // ByVal argument is passed in as a pointer but it's now being 2331 // dereferenced. e.g. 2332 // define @foo(%struct.X* %A) { 2333 // tail call @bar(%struct.X* byval %A) 2334 // } 2335 return false; 2336 SDValue Ptr = Ld->getBasePtr(); 2337 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2338 if (!FINode) 2339 return false; 2340 FI = FINode->getIndex(); 2341 } else 2342 return false; 2343 2344 assert(FI != INT_MAX); 2345 if (!MFI->isFixedObjectIndex(FI)) 2346 return false; 2347 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 2348} 2349 2350/// IsEligibleForTailCallOptimization - Check whether the call is eligible 2351/// for tail call optimization. Targets which want to do tail call 2352/// optimization should implement this function. 2353bool 2354X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2355 CallingConv::ID CalleeCC, 2356 bool isVarArg, 2357 bool isCalleeStructRet, 2358 bool isCallerStructRet, 2359 const SmallVectorImpl<ISD::OutputArg> &Outs, 2360 const SmallVectorImpl<SDValue> &OutVals, 2361 const SmallVectorImpl<ISD::InputArg> &Ins, 2362 SelectionDAG& DAG) const { 2363 if (!IsTailCallConvention(CalleeCC) && 2364 CalleeCC != CallingConv::C) 2365 return false; 2366 2367 // If -tailcallopt is specified, make fastcc functions tail-callable. 2368 const MachineFunction &MF = DAG.getMachineFunction(); 2369 const Function *CallerF = DAG.getMachineFunction().getFunction(); 2370 CallingConv::ID CallerCC = CallerF->getCallingConv(); 2371 bool CCMatch = CallerCC == CalleeCC; 2372 2373 if (GuaranteedTailCallOpt) { 2374 if (IsTailCallConvention(CalleeCC) && CCMatch) 2375 return true; 2376 return false; 2377 } 2378 2379 // Look for obvious safe cases to perform tail call optimization that do not 2380 // require ABI changes. This is what gcc calls sibcall. 2381 2382 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to 2383 // emit a special epilogue. 2384 if (RegInfo->needsStackRealignment(MF)) 2385 return false; 2386 2387 // Do not sibcall optimize vararg calls unless the call site is not passing 2388 // any arguments. 2389 if (isVarArg && !Outs.empty()) 2390 return false; 2391 2392 // Also avoid sibcall optimization if either caller or callee uses struct 2393 // return semantics. 2394 if (isCalleeStructRet || isCallerStructRet) 2395 return false; 2396 2397 // If the call result is in ST0 / ST1, it needs to be popped off the x87 stack. 2398 // Therefore if it's not used by the call it is not safe to optimize this into 2399 // a sibcall. 2400 bool Unused = false; 2401 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 2402 if (!Ins[i].Used) { 2403 Unused = true; 2404 break; 2405 } 2406 } 2407 if (Unused) { 2408 SmallVector<CCValAssign, 16> RVLocs; 2409 CCState CCInfo(CalleeCC, false, getTargetMachine(), 2410 RVLocs, *DAG.getContext()); 2411 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 2412 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 2413 CCValAssign &VA = RVLocs[i]; 2414 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) 2415 return false; 2416 } 2417 } 2418 2419 // If the calling conventions do not match, then we'd better make sure the 2420 // results are returned in the same way as what the caller expects. 2421 if (!CCMatch) { 2422 SmallVector<CCValAssign, 16> RVLocs1; 2423 CCState CCInfo1(CalleeCC, false, getTargetMachine(), 2424 RVLocs1, *DAG.getContext()); 2425 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86); 2426 2427 SmallVector<CCValAssign, 16> RVLocs2; 2428 CCState CCInfo2(CallerCC, false, getTargetMachine(), 2429 RVLocs2, *DAG.getContext()); 2430 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86); 2431 2432 if (RVLocs1.size() != RVLocs2.size()) 2433 return false; 2434 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 2435 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 2436 return false; 2437 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 2438 return false; 2439 if (RVLocs1[i].isRegLoc()) { 2440 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 2441 return false; 2442 } else { 2443 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 2444 return false; 2445 } 2446 } 2447 } 2448 2449 // If the callee takes no arguments then go on to check the results of the 2450 // call. 2451 if (!Outs.empty()) { 2452 // Check if stack adjustment is needed. For now, do not do this if any 2453 // argument is passed on the stack. 2454 SmallVector<CCValAssign, 16> ArgLocs; 2455 CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(), 2456 ArgLocs, *DAG.getContext()); 2457 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2458 if (CCInfo.getNextStackOffset()) { 2459 MachineFunction &MF = DAG.getMachineFunction(); 2460 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) 2461 return false; 2462 if (Subtarget->isTargetWin64()) 2463 // Win64 ABI has additional complications. 2464 return false; 2465 2466 // Check if the arguments are already laid out in the right way as 2467 // the caller's fixed stack objects. 2468 MachineFrameInfo *MFI = MF.getFrameInfo(); 2469 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2470 const X86InstrInfo *TII = 2471 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 2472 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2473 CCValAssign &VA = ArgLocs[i]; 2474 SDValue Arg = OutVals[i]; 2475 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2476 if (VA.getLocInfo() == CCValAssign::Indirect) 2477 return false; 2478 if (!VA.isRegLoc()) { 2479 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2480 MFI, MRI, TII)) 2481 return false; 2482 } 2483 } 2484 } 2485 2486 // If the tailcall address may be in a register, then make sure it's 2487 // possible to register allocate for it. In 32-bit, the call address can 2488 // only target EAX, EDX, or ECX since the tail call must be scheduled after 2489 // callee-saved registers are restored. These happen to be the same 2490 // registers used to pass 'inreg' arguments so watch out for those. 2491 if (!Subtarget->is64Bit() && 2492 !isa<GlobalAddressSDNode>(Callee) && 2493 !isa<ExternalSymbolSDNode>(Callee)) { 2494 unsigned NumInRegs = 0; 2495 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2496 CCValAssign &VA = ArgLocs[i]; 2497 if (!VA.isRegLoc()) 2498 continue; 2499 unsigned Reg = VA.getLocReg(); 2500 switch (Reg) { 2501 default: break; 2502 case X86::EAX: case X86::EDX: case X86::ECX: 2503 if (++NumInRegs == 3) 2504 return false; 2505 break; 2506 } 2507 } 2508 } 2509 } 2510 2511 // An stdcall caller is expected to clean up its arguments; the callee 2512 // isn't going to do that. 2513 if (!CCMatch && CallerCC==CallingConv::X86_StdCall) 2514 return false; 2515 2516 return true; 2517} 2518 2519FastISel * 2520X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 2521 return X86::createFastISel(funcInfo); 2522} 2523 2524 2525//===----------------------------------------------------------------------===// 2526// Other Lowering Hooks 2527//===----------------------------------------------------------------------===// 2528 2529static bool MayFoldLoad(SDValue Op) { 2530 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode()); 2531} 2532 2533static bool MayFoldIntoStore(SDValue Op) { 2534 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); 2535} 2536 2537static bool isTargetShuffle(unsigned Opcode) { 2538 switch(Opcode) { 2539 default: return false; 2540 case X86ISD::PSHUFD: 2541 case X86ISD::PSHUFHW: 2542 case X86ISD::PSHUFLW: 2543 case X86ISD::SHUFPD: 2544 case X86ISD::PALIGN: 2545 case X86ISD::SHUFPS: 2546 case X86ISD::MOVLHPS: 2547 case X86ISD::MOVLHPD: 2548 case X86ISD::MOVHLPS: 2549 case X86ISD::MOVLPS: 2550 case X86ISD::MOVLPD: 2551 case X86ISD::MOVSHDUP: 2552 case X86ISD::MOVSLDUP: 2553 case X86ISD::MOVDDUP: 2554 case X86ISD::MOVSS: 2555 case X86ISD::MOVSD: 2556 case X86ISD::UNPCKLPS: 2557 case X86ISD::UNPCKLPD: 2558 case X86ISD::PUNPCKLWD: 2559 case X86ISD::PUNPCKLBW: 2560 case X86ISD::PUNPCKLDQ: 2561 case X86ISD::PUNPCKLQDQ: 2562 case X86ISD::UNPCKHPS: 2563 case X86ISD::UNPCKHPD: 2564 case X86ISD::PUNPCKHWD: 2565 case X86ISD::PUNPCKHBW: 2566 case X86ISD::PUNPCKHDQ: 2567 case X86ISD::PUNPCKHQDQ: 2568 return true; 2569 } 2570 return false; 2571} 2572 2573static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2574 SDValue V1, SelectionDAG &DAG) { 2575 switch(Opc) { 2576 default: llvm_unreachable("Unknown x86 shuffle node"); 2577 case X86ISD::MOVSHDUP: 2578 case X86ISD::MOVSLDUP: 2579 case X86ISD::MOVDDUP: 2580 return DAG.getNode(Opc, dl, VT, V1); 2581 } 2582 2583 return SDValue(); 2584} 2585 2586static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2587 SDValue V1, unsigned TargetMask, SelectionDAG &DAG) { 2588 switch(Opc) { 2589 default: llvm_unreachable("Unknown x86 shuffle node"); 2590 case X86ISD::PSHUFD: 2591 case X86ISD::PSHUFHW: 2592 case X86ISD::PSHUFLW: 2593 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8)); 2594 } 2595 2596 return SDValue(); 2597} 2598 2599static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2600 SDValue V1, SDValue V2, unsigned TargetMask, SelectionDAG &DAG) { 2601 switch(Opc) { 2602 default: llvm_unreachable("Unknown x86 shuffle node"); 2603 case X86ISD::PALIGN: 2604 case X86ISD::SHUFPD: 2605 case X86ISD::SHUFPS: 2606 return DAG.getNode(Opc, dl, VT, V1, V2, 2607 DAG.getConstant(TargetMask, MVT::i8)); 2608 } 2609 return SDValue(); 2610} 2611 2612static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2613 SDValue V1, SDValue V2, SelectionDAG &DAG) { 2614 switch(Opc) { 2615 default: llvm_unreachable("Unknown x86 shuffle node"); 2616 case X86ISD::MOVLHPS: 2617 case X86ISD::MOVLHPD: 2618 case X86ISD::MOVHLPS: 2619 case X86ISD::MOVLPS: 2620 case X86ISD::MOVLPD: 2621 case X86ISD::MOVSS: 2622 case X86ISD::MOVSD: 2623 case X86ISD::UNPCKLPS: 2624 case X86ISD::UNPCKLPD: 2625 case X86ISD::PUNPCKLWD: 2626 case X86ISD::PUNPCKLBW: 2627 case X86ISD::PUNPCKLDQ: 2628 case X86ISD::PUNPCKLQDQ: 2629 case X86ISD::UNPCKHPS: 2630 case X86ISD::UNPCKHPD: 2631 case X86ISD::PUNPCKHWD: 2632 case X86ISD::PUNPCKHBW: 2633 case X86ISD::PUNPCKHDQ: 2634 case X86ISD::PUNPCKHQDQ: 2635 return DAG.getNode(Opc, dl, VT, V1, V2); 2636 } 2637 return SDValue(); 2638} 2639 2640SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { 2641 MachineFunction &MF = DAG.getMachineFunction(); 2642 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 2643 int ReturnAddrIndex = FuncInfo->getRAIndex(); 2644 2645 if (ReturnAddrIndex == 0) { 2646 // Set up a frame object for the return address. 2647 uint64_t SlotSize = TD->getPointerSize(); 2648 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize, 2649 false); 2650 FuncInfo->setRAIndex(ReturnAddrIndex); 2651 } 2652 2653 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 2654} 2655 2656 2657bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, 2658 bool hasSymbolicDisplacement) { 2659 // Offset should fit into 32 bit immediate field. 2660 if (!isInt<32>(Offset)) 2661 return false; 2662 2663 // If we don't have a symbolic displacement - we don't have any extra 2664 // restrictions. 2665 if (!hasSymbolicDisplacement) 2666 return true; 2667 2668 // FIXME: Some tweaks might be needed for medium code model. 2669 if (M != CodeModel::Small && M != CodeModel::Kernel) 2670 return false; 2671 2672 // For small code model we assume that latest object is 16MB before end of 31 2673 // bits boundary. We may also accept pretty large negative constants knowing 2674 // that all objects are in the positive half of address space. 2675 if (M == CodeModel::Small && Offset < 16*1024*1024) 2676 return true; 2677 2678 // For kernel code model we know that all object resist in the negative half 2679 // of 32bits address space. We may not accept negative offsets, since they may 2680 // be just off and we may accept pretty large positive ones. 2681 if (M == CodeModel::Kernel && Offset > 0) 2682 return true; 2683 2684 return false; 2685} 2686 2687/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86 2688/// specific condition code, returning the condition code and the LHS/RHS of the 2689/// comparison to make. 2690static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 2691 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) { 2692 if (!isFP) { 2693 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 2694 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 2695 // X > -1 -> X == 0, jump !sign. 2696 RHS = DAG.getConstant(0, RHS.getValueType()); 2697 return X86::COND_NS; 2698 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 2699 // X < 0 -> X == 0, jump on sign. 2700 return X86::COND_S; 2701 } else if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { 2702 // X < 1 -> X <= 0 2703 RHS = DAG.getConstant(0, RHS.getValueType()); 2704 return X86::COND_LE; 2705 } 2706 } 2707 2708 switch (SetCCOpcode) { 2709 default: llvm_unreachable("Invalid integer condition!"); 2710 case ISD::SETEQ: return X86::COND_E; 2711 case ISD::SETGT: return X86::COND_G; 2712 case ISD::SETGE: return X86::COND_GE; 2713 case ISD::SETLT: return X86::COND_L; 2714 case ISD::SETLE: return X86::COND_LE; 2715 case ISD::SETNE: return X86::COND_NE; 2716 case ISD::SETULT: return X86::COND_B; 2717 case ISD::SETUGT: return X86::COND_A; 2718 case ISD::SETULE: return X86::COND_BE; 2719 case ISD::SETUGE: return X86::COND_AE; 2720 } 2721 } 2722 2723 // First determine if it is required or is profitable to flip the operands. 2724 2725 // If LHS is a foldable load, but RHS is not, flip the condition. 2726 if ((ISD::isNON_EXTLoad(LHS.getNode()) && LHS.hasOneUse()) && 2727 !(ISD::isNON_EXTLoad(RHS.getNode()) && RHS.hasOneUse())) { 2728 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode); 2729 std::swap(LHS, RHS); 2730 } 2731 2732 switch (SetCCOpcode) { 2733 default: break; 2734 case ISD::SETOLT: 2735 case ISD::SETOLE: 2736 case ISD::SETUGT: 2737 case ISD::SETUGE: 2738 std::swap(LHS, RHS); 2739 break; 2740 } 2741 2742 // On a floating point condition, the flags are set as follows: 2743 // ZF PF CF op 2744 // 0 | 0 | 0 | X > Y 2745 // 0 | 0 | 1 | X < Y 2746 // 1 | 0 | 0 | X == Y 2747 // 1 | 1 | 1 | unordered 2748 switch (SetCCOpcode) { 2749 default: llvm_unreachable("Condcode should be pre-legalized away"); 2750 case ISD::SETUEQ: 2751 case ISD::SETEQ: return X86::COND_E; 2752 case ISD::SETOLT: // flipped 2753 case ISD::SETOGT: 2754 case ISD::SETGT: return X86::COND_A; 2755 case ISD::SETOLE: // flipped 2756 case ISD::SETOGE: 2757 case ISD::SETGE: return X86::COND_AE; 2758 case ISD::SETUGT: // flipped 2759 case ISD::SETULT: 2760 case ISD::SETLT: return X86::COND_B; 2761 case ISD::SETUGE: // flipped 2762 case ISD::SETULE: 2763 case ISD::SETLE: return X86::COND_BE; 2764 case ISD::SETONE: 2765 case ISD::SETNE: return X86::COND_NE; 2766 case ISD::SETUO: return X86::COND_P; 2767 case ISD::SETO: return X86::COND_NP; 2768 case ISD::SETOEQ: 2769 case ISD::SETUNE: return X86::COND_INVALID; 2770 } 2771} 2772 2773/// hasFPCMov - is there a floating point cmov for the specific X86 condition 2774/// code. Current x86 isa includes the following FP cmov instructions: 2775/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 2776static bool hasFPCMov(unsigned X86CC) { 2777 switch (X86CC) { 2778 default: 2779 return false; 2780 case X86::COND_B: 2781 case X86::COND_BE: 2782 case X86::COND_E: 2783 case X86::COND_P: 2784 case X86::COND_A: 2785 case X86::COND_AE: 2786 case X86::COND_NE: 2787 case X86::COND_NP: 2788 return true; 2789 } 2790} 2791 2792/// isFPImmLegal - Returns true if the target can instruction select the 2793/// specified FP immediate natively. If false, the legalizer will 2794/// materialize the FP immediate as a load from a constant pool. 2795bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 2796 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) { 2797 if (Imm.bitwiseIsEqual(LegalFPImmediates[i])) 2798 return true; 2799 } 2800 return false; 2801} 2802 2803/// isUndefOrInRange - Return true if Val is undef or if its value falls within 2804/// the specified range (L, H]. 2805static bool isUndefOrInRange(int Val, int Low, int Hi) { 2806 return (Val < 0) || (Val >= Low && Val < Hi); 2807} 2808 2809/// isUndefOrEqual - Val is either less than zero (undef) or equal to the 2810/// specified value. 2811static bool isUndefOrEqual(int Val, int CmpVal) { 2812 if (Val < 0 || Val == CmpVal) 2813 return true; 2814 return false; 2815} 2816 2817/// isPSHUFDMask - Return true if the node specifies a shuffle of elements that 2818/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference 2819/// the second operand. 2820static bool isPSHUFDMask(const SmallVectorImpl<int> &Mask, EVT VT) { 2821 if (VT == MVT::v4f32 || VT == MVT::v4i32 ) 2822 return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4); 2823 if (VT == MVT::v2f64 || VT == MVT::v2i64) 2824 return (Mask[0] < 2 && Mask[1] < 2); 2825 return false; 2826} 2827 2828bool X86::isPSHUFDMask(ShuffleVectorSDNode *N) { 2829 SmallVector<int, 8> M; 2830 N->getMask(M); 2831 return ::isPSHUFDMask(M, N->getValueType(0)); 2832} 2833 2834/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that 2835/// is suitable for input to PSHUFHW. 2836static bool isPSHUFHWMask(const SmallVectorImpl<int> &Mask, EVT VT) { 2837 if (VT != MVT::v8i16) 2838 return false; 2839 2840 // Lower quadword copied in order or undef. 2841 for (int i = 0; i != 4; ++i) 2842 if (Mask[i] >= 0 && Mask[i] != i) 2843 return false; 2844 2845 // Upper quadword shuffled. 2846 for (int i = 4; i != 8; ++i) 2847 if (Mask[i] >= 0 && (Mask[i] < 4 || Mask[i] > 7)) 2848 return false; 2849 2850 return true; 2851} 2852 2853bool X86::isPSHUFHWMask(ShuffleVectorSDNode *N) { 2854 SmallVector<int, 8> M; 2855 N->getMask(M); 2856 return ::isPSHUFHWMask(M, N->getValueType(0)); 2857} 2858 2859/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that 2860/// is suitable for input to PSHUFLW. 2861static bool isPSHUFLWMask(const SmallVectorImpl<int> &Mask, EVT VT) { 2862 if (VT != MVT::v8i16) 2863 return false; 2864 2865 // Upper quadword copied in order. 2866 for (int i = 4; i != 8; ++i) 2867 if (Mask[i] >= 0 && Mask[i] != i) 2868 return false; 2869 2870 // Lower quadword shuffled. 2871 for (int i = 0; i != 4; ++i) 2872 if (Mask[i] >= 4) 2873 return false; 2874 2875 return true; 2876} 2877 2878bool X86::isPSHUFLWMask(ShuffleVectorSDNode *N) { 2879 SmallVector<int, 8> M; 2880 N->getMask(M); 2881 return ::isPSHUFLWMask(M, N->getValueType(0)); 2882} 2883 2884/// isPALIGNRMask - Return true if the node specifies a shuffle of elements that 2885/// is suitable for input to PALIGNR. 2886static bool isPALIGNRMask(const SmallVectorImpl<int> &Mask, EVT VT, 2887 bool hasSSSE3) { 2888 int i, e = VT.getVectorNumElements(); 2889 2890 // Do not handle v2i64 / v2f64 shuffles with palignr. 2891 if (e < 4 || !hasSSSE3) 2892 return false; 2893 2894 for (i = 0; i != e; ++i) 2895 if (Mask[i] >= 0) 2896 break; 2897 2898 // All undef, not a palignr. 2899 if (i == e) 2900 return false; 2901 2902 // Determine if it's ok to perform a palignr with only the LHS, since we 2903 // don't have access to the actual shuffle elements to see if RHS is undef. 2904 bool Unary = Mask[i] < (int)e; 2905 bool NeedsUnary = false; 2906 2907 int s = Mask[i] - i; 2908 2909 // Check the rest of the elements to see if they are consecutive. 2910 for (++i; i != e; ++i) { 2911 int m = Mask[i]; 2912 if (m < 0) 2913 continue; 2914 2915 Unary = Unary && (m < (int)e); 2916 NeedsUnary = NeedsUnary || (m < s); 2917 2918 if (NeedsUnary && !Unary) 2919 return false; 2920 if (Unary && m != ((s+i) & (e-1))) 2921 return false; 2922 if (!Unary && m != (s+i)) 2923 return false; 2924 } 2925 return true; 2926} 2927 2928bool X86::isPALIGNRMask(ShuffleVectorSDNode *N) { 2929 SmallVector<int, 8> M; 2930 N->getMask(M); 2931 return ::isPALIGNRMask(M, N->getValueType(0), true); 2932} 2933 2934/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 2935/// specifies a shuffle of elements that is suitable for input to SHUFP*. 2936static bool isSHUFPMask(const SmallVectorImpl<int> &Mask, EVT VT) { 2937 int NumElems = VT.getVectorNumElements(); 2938 if (NumElems != 2 && NumElems != 4) 2939 return false; 2940 2941 int Half = NumElems / 2; 2942 for (int i = 0; i < Half; ++i) 2943 if (!isUndefOrInRange(Mask[i], 0, NumElems)) 2944 return false; 2945 for (int i = Half; i < NumElems; ++i) 2946 if (!isUndefOrInRange(Mask[i], NumElems, NumElems*2)) 2947 return false; 2948 2949 return true; 2950} 2951 2952bool X86::isSHUFPMask(ShuffleVectorSDNode *N) { 2953 SmallVector<int, 8> M; 2954 N->getMask(M); 2955 return ::isSHUFPMask(M, N->getValueType(0)); 2956} 2957 2958/// isCommutedSHUFP - Returns true if the shuffle mask is exactly 2959/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 2960/// half elements to come from vector 1 (which would equal the dest.) and 2961/// the upper half to come from vector 2. 2962static bool isCommutedSHUFPMask(const SmallVectorImpl<int> &Mask, EVT VT) { 2963 int NumElems = VT.getVectorNumElements(); 2964 2965 if (NumElems != 2 && NumElems != 4) 2966 return false; 2967 2968 int Half = NumElems / 2; 2969 for (int i = 0; i < Half; ++i) 2970 if (!isUndefOrInRange(Mask[i], NumElems, NumElems*2)) 2971 return false; 2972 for (int i = Half; i < NumElems; ++i) 2973 if (!isUndefOrInRange(Mask[i], 0, NumElems)) 2974 return false; 2975 return true; 2976} 2977 2978static bool isCommutedSHUFP(ShuffleVectorSDNode *N) { 2979 SmallVector<int, 8> M; 2980 N->getMask(M); 2981 return isCommutedSHUFPMask(M, N->getValueType(0)); 2982} 2983 2984/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 2985/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 2986bool X86::isMOVHLPSMask(ShuffleVectorSDNode *N) { 2987 if (N->getValueType(0).getVectorNumElements() != 4) 2988 return false; 2989 2990 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 2991 return isUndefOrEqual(N->getMaskElt(0), 6) && 2992 isUndefOrEqual(N->getMaskElt(1), 7) && 2993 isUndefOrEqual(N->getMaskElt(2), 2) && 2994 isUndefOrEqual(N->getMaskElt(3), 3); 2995} 2996 2997/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 2998/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 2999/// <2, 3, 2, 3> 3000bool X86::isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N) { 3001 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 3002 3003 if (NumElems != 4) 3004 return false; 3005 3006 return isUndefOrEqual(N->getMaskElt(0), 2) && 3007 isUndefOrEqual(N->getMaskElt(1), 3) && 3008 isUndefOrEqual(N->getMaskElt(2), 2) && 3009 isUndefOrEqual(N->getMaskElt(3), 3); 3010} 3011 3012/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 3013/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 3014bool X86::isMOVLPMask(ShuffleVectorSDNode *N) { 3015 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 3016 3017 if (NumElems != 2 && NumElems != 4) 3018 return false; 3019 3020 for (unsigned i = 0; i < NumElems/2; ++i) 3021 if (!isUndefOrEqual(N->getMaskElt(i), i + NumElems)) 3022 return false; 3023 3024 for (unsigned i = NumElems/2; i < NumElems; ++i) 3025 if (!isUndefOrEqual(N->getMaskElt(i), i)) 3026 return false; 3027 3028 return true; 3029} 3030 3031/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand 3032/// specifies a shuffle of elements that is suitable for input to MOVLHPS. 3033bool X86::isMOVLHPSMask(ShuffleVectorSDNode *N) { 3034 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 3035 3036 if (NumElems != 2 && NumElems != 4) 3037 return false; 3038 3039 for (unsigned i = 0; i < NumElems/2; ++i) 3040 if (!isUndefOrEqual(N->getMaskElt(i), i)) 3041 return false; 3042 3043 for (unsigned i = 0; i < NumElems/2; ++i) 3044 if (!isUndefOrEqual(N->getMaskElt(i + NumElems/2), i + NumElems)) 3045 return false; 3046 3047 return true; 3048} 3049 3050/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 3051/// specifies a shuffle of elements that is suitable for input to UNPCKL. 3052static bool isUNPCKLMask(const SmallVectorImpl<int> &Mask, EVT VT, 3053 bool V2IsSplat = false) { 3054 int NumElts = VT.getVectorNumElements(); 3055 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 3056 return false; 3057 3058 for (int i = 0, j = 0; i != NumElts; i += 2, ++j) { 3059 int BitI = Mask[i]; 3060 int BitI1 = Mask[i+1]; 3061 if (!isUndefOrEqual(BitI, j)) 3062 return false; 3063 if (V2IsSplat) { 3064 if (!isUndefOrEqual(BitI1, NumElts)) 3065 return false; 3066 } else { 3067 if (!isUndefOrEqual(BitI1, j + NumElts)) 3068 return false; 3069 } 3070 } 3071 return true; 3072} 3073 3074bool X86::isUNPCKLMask(ShuffleVectorSDNode *N, bool V2IsSplat) { 3075 SmallVector<int, 8> M; 3076 N->getMask(M); 3077 return ::isUNPCKLMask(M, N->getValueType(0), V2IsSplat); 3078} 3079 3080/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 3081/// specifies a shuffle of elements that is suitable for input to UNPCKH. 3082static bool isUNPCKHMask(const SmallVectorImpl<int> &Mask, EVT VT, 3083 bool V2IsSplat = false) { 3084 int NumElts = VT.getVectorNumElements(); 3085 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 3086 return false; 3087 3088 for (int i = 0, j = 0; i != NumElts; i += 2, ++j) { 3089 int BitI = Mask[i]; 3090 int BitI1 = Mask[i+1]; 3091 if (!isUndefOrEqual(BitI, j + NumElts/2)) 3092 return false; 3093 if (V2IsSplat) { 3094 if (isUndefOrEqual(BitI1, NumElts)) 3095 return false; 3096 } else { 3097 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts)) 3098 return false; 3099 } 3100 } 3101 return true; 3102} 3103 3104bool X86::isUNPCKHMask(ShuffleVectorSDNode *N, bool V2IsSplat) { 3105 SmallVector<int, 8> M; 3106 N->getMask(M); 3107 return ::isUNPCKHMask(M, N->getValueType(0), V2IsSplat); 3108} 3109 3110/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 3111/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 3112/// <0, 0, 1, 1> 3113static bool isUNPCKL_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT) { 3114 int NumElems = VT.getVectorNumElements(); 3115 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 3116 return false; 3117 3118 for (int i = 0, j = 0; i != NumElems; i += 2, ++j) { 3119 int BitI = Mask[i]; 3120 int BitI1 = Mask[i+1]; 3121 if (!isUndefOrEqual(BitI, j)) 3122 return false; 3123 if (!isUndefOrEqual(BitI1, j)) 3124 return false; 3125 } 3126 return true; 3127} 3128 3129bool X86::isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N) { 3130 SmallVector<int, 8> M; 3131 N->getMask(M); 3132 return ::isUNPCKL_v_undef_Mask(M, N->getValueType(0)); 3133} 3134 3135/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 3136/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 3137/// <2, 2, 3, 3> 3138static bool isUNPCKH_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT) { 3139 int NumElems = VT.getVectorNumElements(); 3140 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 3141 return false; 3142 3143 for (int i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { 3144 int BitI = Mask[i]; 3145 int BitI1 = Mask[i+1]; 3146 if (!isUndefOrEqual(BitI, j)) 3147 return false; 3148 if (!isUndefOrEqual(BitI1, j)) 3149 return false; 3150 } 3151 return true; 3152} 3153 3154bool X86::isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N) { 3155 SmallVector<int, 8> M; 3156 N->getMask(M); 3157 return ::isUNPCKH_v_undef_Mask(M, N->getValueType(0)); 3158} 3159 3160/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 3161/// specifies a shuffle of elements that is suitable for input to MOVSS, 3162/// MOVSD, and MOVD, i.e. setting the lowest element. 3163static bool isMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT) { 3164 if (VT.getVectorElementType().getSizeInBits() < 32) 3165 return false; 3166 3167 int NumElts = VT.getVectorNumElements(); 3168 3169 if (!isUndefOrEqual(Mask[0], NumElts)) 3170 return false; 3171 3172 for (int i = 1; i < NumElts; ++i) 3173 if (!isUndefOrEqual(Mask[i], i)) 3174 return false; 3175 3176 return true; 3177} 3178 3179bool X86::isMOVLMask(ShuffleVectorSDNode *N) { 3180 SmallVector<int, 8> M; 3181 N->getMask(M); 3182 return ::isMOVLMask(M, N->getValueType(0)); 3183} 3184 3185/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 3186/// of what x86 movss want. X86 movs requires the lowest element to be lowest 3187/// element of vector 2 and the other elements to come from vector 1 in order. 3188static bool isCommutedMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT, 3189 bool V2IsSplat = false, bool V2IsUndef = false) { 3190 int NumOps = VT.getVectorNumElements(); 3191 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 3192 return false; 3193 3194 if (!isUndefOrEqual(Mask[0], 0)) 3195 return false; 3196 3197 for (int i = 1; i < NumOps; ++i) 3198 if (!(isUndefOrEqual(Mask[i], i+NumOps) || 3199 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) || 3200 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps)))) 3201 return false; 3202 3203 return true; 3204} 3205 3206static bool isCommutedMOVL(ShuffleVectorSDNode *N, bool V2IsSplat = false, 3207 bool V2IsUndef = false) { 3208 SmallVector<int, 8> M; 3209 N->getMask(M); 3210 return isCommutedMOVLMask(M, N->getValueType(0), V2IsSplat, V2IsUndef); 3211} 3212 3213/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3214/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 3215bool X86::isMOVSHDUPMask(ShuffleVectorSDNode *N) { 3216 if (N->getValueType(0).getVectorNumElements() != 4) 3217 return false; 3218 3219 // Expect 1, 1, 3, 3 3220 for (unsigned i = 0; i < 2; ++i) { 3221 int Elt = N->getMaskElt(i); 3222 if (Elt >= 0 && Elt != 1) 3223 return false; 3224 } 3225 3226 bool HasHi = false; 3227 for (unsigned i = 2; i < 4; ++i) { 3228 int Elt = N->getMaskElt(i); 3229 if (Elt >= 0 && Elt != 3) 3230 return false; 3231 if (Elt == 3) 3232 HasHi = true; 3233 } 3234 // Don't use movshdup if it can be done with a shufps. 3235 // FIXME: verify that matching u, u, 3, 3 is what we want. 3236 return HasHi; 3237} 3238 3239/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3240/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 3241bool X86::isMOVSLDUPMask(ShuffleVectorSDNode *N) { 3242 if (N->getValueType(0).getVectorNumElements() != 4) 3243 return false; 3244 3245 // Expect 0, 0, 2, 2 3246 for (unsigned i = 0; i < 2; ++i) 3247 if (N->getMaskElt(i) > 0) 3248 return false; 3249 3250 bool HasHi = false; 3251 for (unsigned i = 2; i < 4; ++i) { 3252 int Elt = N->getMaskElt(i); 3253 if (Elt >= 0 && Elt != 2) 3254 return false; 3255 if (Elt == 2) 3256 HasHi = true; 3257 } 3258 // Don't use movsldup if it can be done with a shufps. 3259 return HasHi; 3260} 3261 3262/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3263/// specifies a shuffle of elements that is suitable for input to MOVDDUP. 3264bool X86::isMOVDDUPMask(ShuffleVectorSDNode *N) { 3265 int e = N->getValueType(0).getVectorNumElements() / 2; 3266 3267 for (int i = 0; i < e; ++i) 3268 if (!isUndefOrEqual(N->getMaskElt(i), i)) 3269 return false; 3270 for (int i = 0; i < e; ++i) 3271 if (!isUndefOrEqual(N->getMaskElt(e+i), i)) 3272 return false; 3273 return true; 3274} 3275 3276/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 3277/// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions. 3278unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 3279 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3280 int NumOperands = SVOp->getValueType(0).getVectorNumElements(); 3281 3282 unsigned Shift = (NumOperands == 4) ? 2 : 1; 3283 unsigned Mask = 0; 3284 for (int i = 0; i < NumOperands; ++i) { 3285 int Val = SVOp->getMaskElt(NumOperands-i-1); 3286 if (Val < 0) Val = 0; 3287 if (Val >= NumOperands) Val -= NumOperands; 3288 Mask |= Val; 3289 if (i != NumOperands - 1) 3290 Mask <<= Shift; 3291 } 3292 return Mask; 3293} 3294 3295/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 3296/// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction. 3297unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 3298 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3299 unsigned Mask = 0; 3300 // 8 nodes, but we only care about the last 4. 3301 for (unsigned i = 7; i >= 4; --i) { 3302 int Val = SVOp->getMaskElt(i); 3303 if (Val >= 0) 3304 Mask |= (Val - 4); 3305 if (i != 4) 3306 Mask <<= 2; 3307 } 3308 return Mask; 3309} 3310 3311/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 3312/// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction. 3313unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 3314 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3315 unsigned Mask = 0; 3316 // 8 nodes, but we only care about the first 4. 3317 for (int i = 3; i >= 0; --i) { 3318 int Val = SVOp->getMaskElt(i); 3319 if (Val >= 0) 3320 Mask |= Val; 3321 if (i != 0) 3322 Mask <<= 2; 3323 } 3324 return Mask; 3325} 3326 3327/// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle 3328/// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction. 3329unsigned X86::getShufflePALIGNRImmediate(SDNode *N) { 3330 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3331 EVT VVT = N->getValueType(0); 3332 unsigned EltSize = VVT.getVectorElementType().getSizeInBits() >> 3; 3333 int Val = 0; 3334 3335 unsigned i, e; 3336 for (i = 0, e = VVT.getVectorNumElements(); i != e; ++i) { 3337 Val = SVOp->getMaskElt(i); 3338 if (Val >= 0) 3339 break; 3340 } 3341 return (Val - i) * EltSize; 3342} 3343 3344/// isZeroNode - Returns true if Elt is a constant zero or a floating point 3345/// constant +0.0. 3346bool X86::isZeroNode(SDValue Elt) { 3347 return ((isa<ConstantSDNode>(Elt) && 3348 cast<ConstantSDNode>(Elt)->isNullValue()) || 3349 (isa<ConstantFPSDNode>(Elt) && 3350 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 3351} 3352 3353/// CommuteVectorShuffle - Swap vector_shuffle operands as well as values in 3354/// their permute mask. 3355static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp, 3356 SelectionDAG &DAG) { 3357 EVT VT = SVOp->getValueType(0); 3358 unsigned NumElems = VT.getVectorNumElements(); 3359 SmallVector<int, 8> MaskVec; 3360 3361 for (unsigned i = 0; i != NumElems; ++i) { 3362 int idx = SVOp->getMaskElt(i); 3363 if (idx < 0) 3364 MaskVec.push_back(idx); 3365 else if (idx < (int)NumElems) 3366 MaskVec.push_back(idx + NumElems); 3367 else 3368 MaskVec.push_back(idx - NumElems); 3369 } 3370 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1), 3371 SVOp->getOperand(0), &MaskVec[0]); 3372} 3373 3374/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 3375/// the two vector operands have swapped position. 3376static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, EVT VT) { 3377 unsigned NumElems = VT.getVectorNumElements(); 3378 for (unsigned i = 0; i != NumElems; ++i) { 3379 int idx = Mask[i]; 3380 if (idx < 0) 3381 continue; 3382 else if (idx < (int)NumElems) 3383 Mask[i] = idx + NumElems; 3384 else 3385 Mask[i] = idx - NumElems; 3386 } 3387} 3388 3389/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 3390/// match movhlps. The lower half elements should come from upper half of 3391/// V1 (and in order), and the upper half elements should come from the upper 3392/// half of V2 (and in order). 3393static bool ShouldXformToMOVHLPS(ShuffleVectorSDNode *Op) { 3394 if (Op->getValueType(0).getVectorNumElements() != 4) 3395 return false; 3396 for (unsigned i = 0, e = 2; i != e; ++i) 3397 if (!isUndefOrEqual(Op->getMaskElt(i), i+2)) 3398 return false; 3399 for (unsigned i = 2; i != 4; ++i) 3400 if (!isUndefOrEqual(Op->getMaskElt(i), i+4)) 3401 return false; 3402 return true; 3403} 3404 3405/// isScalarLoadToVector - Returns true if the node is a scalar load that 3406/// is promoted to a vector. It also returns the LoadSDNode by reference if 3407/// required. 3408static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) { 3409 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR) 3410 return false; 3411 N = N->getOperand(0).getNode(); 3412 if (!ISD::isNON_EXTLoad(N)) 3413 return false; 3414 if (LD) 3415 *LD = cast<LoadSDNode>(N); 3416 return true; 3417} 3418 3419/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 3420/// match movlp{s|d}. The lower half elements should come from lower half of 3421/// V1 (and in order), and the upper half elements should come from the upper 3422/// half of V2 (and in order). And since V1 will become the source of the 3423/// MOVLP, it must be either a vector load or a scalar load to vector. 3424static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, 3425 ShuffleVectorSDNode *Op) { 3426 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 3427 return false; 3428 // Is V2 is a vector load, don't do this transformation. We will try to use 3429 // load folding shufps op. 3430 if (ISD::isNON_EXTLoad(V2)) 3431 return false; 3432 3433 unsigned NumElems = Op->getValueType(0).getVectorNumElements(); 3434 3435 if (NumElems != 2 && NumElems != 4) 3436 return false; 3437 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3438 if (!isUndefOrEqual(Op->getMaskElt(i), i)) 3439 return false; 3440 for (unsigned i = NumElems/2; i != NumElems; ++i) 3441 if (!isUndefOrEqual(Op->getMaskElt(i), i+NumElems)) 3442 return false; 3443 return true; 3444} 3445 3446/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 3447/// all the same. 3448static bool isSplatVector(SDNode *N) { 3449 if (N->getOpcode() != ISD::BUILD_VECTOR) 3450 return false; 3451 3452 SDValue SplatValue = N->getOperand(0); 3453 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 3454 if (N->getOperand(i) != SplatValue) 3455 return false; 3456 return true; 3457} 3458 3459/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 3460/// to an zero vector. 3461/// FIXME: move to dag combiner / method on ShuffleVectorSDNode 3462static bool isZeroShuffle(ShuffleVectorSDNode *N) { 3463 SDValue V1 = N->getOperand(0); 3464 SDValue V2 = N->getOperand(1); 3465 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 3466 for (unsigned i = 0; i != NumElems; ++i) { 3467 int Idx = N->getMaskElt(i); 3468 if (Idx >= (int)NumElems) { 3469 unsigned Opc = V2.getOpcode(); 3470 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode())) 3471 continue; 3472 if (Opc != ISD::BUILD_VECTOR || 3473 !X86::isZeroNode(V2.getOperand(Idx-NumElems))) 3474 return false; 3475 } else if (Idx >= 0) { 3476 unsigned Opc = V1.getOpcode(); 3477 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode())) 3478 continue; 3479 if (Opc != ISD::BUILD_VECTOR || 3480 !X86::isZeroNode(V1.getOperand(Idx))) 3481 return false; 3482 } 3483 } 3484 return true; 3485} 3486 3487/// getZeroVector - Returns a vector of specified type with all zero elements. 3488/// 3489static SDValue getZeroVector(EVT VT, bool HasSSE2, SelectionDAG &DAG, 3490 DebugLoc dl) { 3491 assert(VT.isVector() && "Expected a vector type"); 3492 3493 // Always build SSE zero vectors as <4 x i32> bitcasted 3494 // to their dest type. This ensures they get CSE'd. 3495 SDValue Vec; 3496 if (VT.getSizeInBits() == 128) { // SSE 3497 if (HasSSE2) { // SSE2 3498 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 3499 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 3500 } else { // SSE1 3501 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 3502 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); 3503 } 3504 } else if (VT.getSizeInBits() == 256) { // AVX 3505 // 256-bit logic and arithmetic instructions in AVX are 3506 // all floating-point, no support for integer ops. Default 3507 // to emitting fp zeroed vectors then. 3508 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 3509 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 3510 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8); 3511 } 3512 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec); 3513} 3514 3515/// getOnesVector - Returns a vector of specified type with all bits set. 3516/// 3517static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 3518 assert(VT.isVector() && "Expected a vector type"); 3519 3520 // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest 3521 // type. This ensures they get CSE'd. 3522 SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); 3523 SDValue Vec; 3524 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 3525 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec); 3526} 3527 3528 3529/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 3530/// that point to V2 points to its first element. 3531static SDValue NormalizeMask(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 3532 EVT VT = SVOp->getValueType(0); 3533 unsigned NumElems = VT.getVectorNumElements(); 3534 3535 bool Changed = false; 3536 SmallVector<int, 8> MaskVec; 3537 SVOp->getMask(MaskVec); 3538 3539 for (unsigned i = 0; i != NumElems; ++i) { 3540 if (MaskVec[i] > (int)NumElems) { 3541 MaskVec[i] = NumElems; 3542 Changed = true; 3543 } 3544 } 3545 if (Changed) 3546 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(0), 3547 SVOp->getOperand(1), &MaskVec[0]); 3548 return SDValue(SVOp, 0); 3549} 3550 3551/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 3552/// operation of specified width. 3553static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 3554 SDValue V2) { 3555 unsigned NumElems = VT.getVectorNumElements(); 3556 SmallVector<int, 8> Mask; 3557 Mask.push_back(NumElems); 3558 for (unsigned i = 1; i != NumElems; ++i) 3559 Mask.push_back(i); 3560 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 3561} 3562 3563/// getUnpackl - Returns a vector_shuffle node for an unpackl operation. 3564static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 3565 SDValue V2) { 3566 unsigned NumElems = VT.getVectorNumElements(); 3567 SmallVector<int, 8> Mask; 3568 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 3569 Mask.push_back(i); 3570 Mask.push_back(i + NumElems); 3571 } 3572 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 3573} 3574 3575/// getUnpackhMask - Returns a vector_shuffle node for an unpackh operation. 3576static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 3577 SDValue V2) { 3578 unsigned NumElems = VT.getVectorNumElements(); 3579 unsigned Half = NumElems/2; 3580 SmallVector<int, 8> Mask; 3581 for (unsigned i = 0; i != Half; ++i) { 3582 Mask.push_back(i + Half); 3583 Mask.push_back(i + NumElems + Half); 3584 } 3585 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 3586} 3587 3588/// PromoteSplat - Promote a splat of v4i32, v8i16 or v16i8 to v4f32. 3589static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { 3590 EVT PVT = MVT::v4f32; 3591 EVT VT = SV->getValueType(0); 3592 DebugLoc dl = SV->getDebugLoc(); 3593 SDValue V1 = SV->getOperand(0); 3594 int NumElems = VT.getVectorNumElements(); 3595 int EltNo = SV->getSplatIndex(); 3596 3597 // unpack elements to the correct location 3598 while (NumElems > 4) { 3599 if (EltNo < NumElems/2) { 3600 V1 = getUnpackl(DAG, dl, VT, V1, V1); 3601 } else { 3602 V1 = getUnpackh(DAG, dl, VT, V1, V1); 3603 EltNo -= NumElems/2; 3604 } 3605 NumElems >>= 1; 3606 } 3607 3608 // Perform the splat. 3609 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo }; 3610 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, PVT, V1); 3611 V1 = DAG.getVectorShuffle(PVT, dl, V1, DAG.getUNDEF(PVT), &SplatMask[0]); 3612 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, V1); 3613} 3614 3615/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 3616/// vector of zero or undef vector. This produces a shuffle where the low 3617/// element of V2 is swizzled into the zero/undef vector, landing at element 3618/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 3619static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, 3620 bool isZero, bool HasSSE2, 3621 SelectionDAG &DAG) { 3622 EVT VT = V2.getValueType(); 3623 SDValue V1 = isZero 3624 ? getZeroVector(VT, HasSSE2, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT); 3625 unsigned NumElems = VT.getVectorNumElements(); 3626 SmallVector<int, 16> MaskVec; 3627 for (unsigned i = 0; i != NumElems; ++i) 3628 // If this is the insertion idx, put the low elt of V2 here. 3629 MaskVec.push_back(i == Idx ? NumElems : i); 3630 return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]); 3631} 3632 3633/// getShuffleScalarElt - Returns the scalar element that will make up the ith 3634/// element of the result of the vector shuffle. 3635SDValue getShuffleScalarElt(SDNode *N, int Index, SelectionDAG &DAG, 3636 unsigned Depth) { 3637 if (Depth == 6) 3638 return SDValue(); // Limit search depth. 3639 3640 SDValue V = SDValue(N, 0); 3641 EVT VT = V.getValueType(); 3642 unsigned Opcode = V.getOpcode(); 3643 3644 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars. 3645 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) { 3646 Index = SV->getMaskElt(Index); 3647 3648 if (Index < 0) 3649 return DAG.getUNDEF(VT.getVectorElementType()); 3650 3651 int NumElems = VT.getVectorNumElements(); 3652 SDValue NewV = (Index < NumElems) ? SV->getOperand(0) : SV->getOperand(1); 3653 return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG, Depth+1); 3654 } 3655 3656 // Recurse into target specific vector shuffles to find scalars. 3657 if (isTargetShuffle(Opcode)) { 3658 int NumElems = VT.getVectorNumElements(); 3659 SmallVector<unsigned, 16> ShuffleMask; 3660 SDValue ImmN; 3661 3662 switch(Opcode) { 3663 case X86ISD::SHUFPS: 3664 case X86ISD::SHUFPD: 3665 ImmN = N->getOperand(N->getNumOperands()-1); 3666 DecodeSHUFPSMask(NumElems, 3667 cast<ConstantSDNode>(ImmN)->getZExtValue(), 3668 ShuffleMask); 3669 break; 3670 case X86ISD::PUNPCKHBW: 3671 case X86ISD::PUNPCKHWD: 3672 case X86ISD::PUNPCKHDQ: 3673 case X86ISD::PUNPCKHQDQ: 3674 DecodePUNPCKHMask(NumElems, ShuffleMask); 3675 break; 3676 case X86ISD::UNPCKHPS: 3677 case X86ISD::UNPCKHPD: 3678 DecodeUNPCKHPMask(NumElems, ShuffleMask); 3679 break; 3680 case X86ISD::PUNPCKLBW: 3681 case X86ISD::PUNPCKLWD: 3682 case X86ISD::PUNPCKLDQ: 3683 case X86ISD::PUNPCKLQDQ: 3684 DecodePUNPCKLMask(NumElems, ShuffleMask); 3685 break; 3686 case X86ISD::UNPCKLPS: 3687 case X86ISD::UNPCKLPD: 3688 DecodeUNPCKLPMask(NumElems, ShuffleMask); 3689 break; 3690 case X86ISD::MOVHLPS: 3691 DecodeMOVHLPSMask(NumElems, ShuffleMask); 3692 break; 3693 case X86ISD::MOVLHPS: 3694 DecodeMOVLHPSMask(NumElems, ShuffleMask); 3695 break; 3696 case X86ISD::PSHUFD: 3697 ImmN = N->getOperand(N->getNumOperands()-1); 3698 DecodePSHUFMask(NumElems, 3699 cast<ConstantSDNode>(ImmN)->getZExtValue(), 3700 ShuffleMask); 3701 break; 3702 case X86ISD::PSHUFHW: 3703 ImmN = N->getOperand(N->getNumOperands()-1); 3704 DecodePSHUFHWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), 3705 ShuffleMask); 3706 break; 3707 case X86ISD::PSHUFLW: 3708 ImmN = N->getOperand(N->getNumOperands()-1); 3709 DecodePSHUFLWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), 3710 ShuffleMask); 3711 break; 3712 case X86ISD::MOVSS: 3713 case X86ISD::MOVSD: { 3714 // The index 0 always comes from the first element of the second source, 3715 // this is why MOVSS and MOVSD are used in the first place. The other 3716 // elements come from the other positions of the first source vector. 3717 unsigned OpNum = (Index == 0) ? 1 : 0; 3718 return getShuffleScalarElt(V.getOperand(OpNum).getNode(), Index, DAG, 3719 Depth+1); 3720 } 3721 default: 3722 assert("not implemented for target shuffle node"); 3723 return SDValue(); 3724 } 3725 3726 Index = ShuffleMask[Index]; 3727 if (Index < 0) 3728 return DAG.getUNDEF(VT.getVectorElementType()); 3729 3730 SDValue NewV = (Index < NumElems) ? N->getOperand(0) : N->getOperand(1); 3731 return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG, 3732 Depth+1); 3733 } 3734 3735 // Actual nodes that may contain scalar elements 3736 if (Opcode == ISD::BIT_CONVERT) { 3737 V = V.getOperand(0); 3738 EVT SrcVT = V.getValueType(); 3739 unsigned NumElems = VT.getVectorNumElements(); 3740 3741 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems) 3742 return SDValue(); 3743 } 3744 3745 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) 3746 return (Index == 0) ? V.getOperand(0) 3747 : DAG.getUNDEF(VT.getVectorElementType()); 3748 3749 if (V.getOpcode() == ISD::BUILD_VECTOR) 3750 return V.getOperand(Index); 3751 3752 return SDValue(); 3753} 3754 3755/// getNumOfConsecutiveZeros - Return the number of elements of a vector 3756/// shuffle operation which come from a consecutively from a zero. The 3757/// search can start in two diferent directions, from left or right. 3758static 3759unsigned getNumOfConsecutiveZeros(SDNode *N, int NumElems, 3760 bool ZerosFromLeft, SelectionDAG &DAG) { 3761 int i = 0; 3762 3763 while (i < NumElems) { 3764 unsigned Index = ZerosFromLeft ? i : NumElems-i-1; 3765 SDValue Elt = getShuffleScalarElt(N, Index, DAG, 0); 3766 if (!(Elt.getNode() && 3767 (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt)))) 3768 break; 3769 ++i; 3770 } 3771 3772 return i; 3773} 3774 3775/// isShuffleMaskConsecutive - Check if the shuffle mask indicies from MaskI to 3776/// MaskE correspond consecutively to elements from one of the vector operands, 3777/// starting from its index OpIdx. Also tell OpNum which source vector operand. 3778static 3779bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, int MaskI, int MaskE, 3780 int OpIdx, int NumElems, unsigned &OpNum) { 3781 bool SeenV1 = false; 3782 bool SeenV2 = false; 3783 3784 for (int i = MaskI; i <= MaskE; ++i, ++OpIdx) { 3785 int Idx = SVOp->getMaskElt(i); 3786 // Ignore undef indicies 3787 if (Idx < 0) 3788 continue; 3789 3790 if (Idx < NumElems) 3791 SeenV1 = true; 3792 else 3793 SeenV2 = true; 3794 3795 // Only accept consecutive elements from the same vector 3796 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2)) 3797 return false; 3798 } 3799 3800 OpNum = SeenV1 ? 0 : 1; 3801 return true; 3802} 3803 3804/// isVectorShiftRight - Returns true if the shuffle can be implemented as a 3805/// logical left shift of a vector. 3806static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 3807 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 3808 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 3809 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 3810 false /* check zeros from right */, DAG); 3811 unsigned OpSrc; 3812 3813 if (!NumZeros) 3814 return false; 3815 3816 // Considering the elements in the mask that are not consecutive zeros, 3817 // check if they consecutively come from only one of the source vectors. 3818 // 3819 // V1 = {X, A, B, C} 0 3820 // \ \ \ / 3821 // vector_shuffle V1, V2 <1, 2, 3, X> 3822 // 3823 if (!isShuffleMaskConsecutive(SVOp, 3824 0, // Mask Start Index 3825 NumElems-NumZeros-1, // Mask End Index 3826 NumZeros, // Where to start looking in the src vector 3827 NumElems, // Number of elements in vector 3828 OpSrc)) // Which source operand ? 3829 return false; 3830 3831 isLeft = false; 3832 ShAmt = NumZeros; 3833 ShVal = SVOp->getOperand(OpSrc); 3834 return true; 3835} 3836 3837/// isVectorShiftLeft - Returns true if the shuffle can be implemented as a 3838/// logical left shift of a vector. 3839static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 3840 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 3841 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 3842 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 3843 true /* check zeros from left */, DAG); 3844 unsigned OpSrc; 3845 3846 if (!NumZeros) 3847 return false; 3848 3849 // Considering the elements in the mask that are not consecutive zeros, 3850 // check if they consecutively come from only one of the source vectors. 3851 // 3852 // 0 { A, B, X, X } = V2 3853 // / \ / / 3854 // vector_shuffle V1, V2 <X, X, 4, 5> 3855 // 3856 if (!isShuffleMaskConsecutive(SVOp, 3857 NumZeros, // Mask Start Index 3858 NumElems-1, // Mask End Index 3859 0, // Where to start looking in the src vector 3860 NumElems, // Number of elements in vector 3861 OpSrc)) // Which source operand ? 3862 return false; 3863 3864 isLeft = true; 3865 ShAmt = NumZeros; 3866 ShVal = SVOp->getOperand(OpSrc); 3867 return true; 3868} 3869 3870/// isVectorShift - Returns true if the shuffle can be implemented as a 3871/// logical left or right shift of a vector. 3872static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 3873 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 3874 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) || 3875 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt)) 3876 return true; 3877 3878 return false; 3879} 3880 3881/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 3882/// 3883static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, 3884 unsigned NumNonZero, unsigned NumZero, 3885 SelectionDAG &DAG, 3886 const TargetLowering &TLI) { 3887 if (NumNonZero > 8) 3888 return SDValue(); 3889 3890 DebugLoc dl = Op.getDebugLoc(); 3891 SDValue V(0, 0); 3892 bool First = true; 3893 for (unsigned i = 0; i < 16; ++i) { 3894 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 3895 if (ThisIsNonZero && First) { 3896 if (NumZero) 3897 V = getZeroVector(MVT::v8i16, true, DAG, dl); 3898 else 3899 V = DAG.getUNDEF(MVT::v8i16); 3900 First = false; 3901 } 3902 3903 if ((i & 1) != 0) { 3904 SDValue ThisElt(0, 0), LastElt(0, 0); 3905 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 3906 if (LastIsNonZero) { 3907 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl, 3908 MVT::i16, Op.getOperand(i-1)); 3909 } 3910 if (ThisIsNonZero) { 3911 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i)); 3912 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, 3913 ThisElt, DAG.getConstant(8, MVT::i8)); 3914 if (LastIsNonZero) 3915 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt); 3916 } else 3917 ThisElt = LastElt; 3918 3919 if (ThisElt.getNode()) 3920 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt, 3921 DAG.getIntPtrConstant(i/2)); 3922 } 3923 } 3924 3925 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V); 3926} 3927 3928/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 3929/// 3930static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, 3931 unsigned NumNonZero, unsigned NumZero, 3932 SelectionDAG &DAG, 3933 const TargetLowering &TLI) { 3934 if (NumNonZero > 4) 3935 return SDValue(); 3936 3937 DebugLoc dl = Op.getDebugLoc(); 3938 SDValue V(0, 0); 3939 bool First = true; 3940 for (unsigned i = 0; i < 8; ++i) { 3941 bool isNonZero = (NonZeros & (1 << i)) != 0; 3942 if (isNonZero) { 3943 if (First) { 3944 if (NumZero) 3945 V = getZeroVector(MVT::v8i16, true, DAG, dl); 3946 else 3947 V = DAG.getUNDEF(MVT::v8i16); 3948 First = false; 3949 } 3950 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, 3951 MVT::v8i16, V, Op.getOperand(i), 3952 DAG.getIntPtrConstant(i)); 3953 } 3954 } 3955 3956 return V; 3957} 3958 3959/// getVShift - Return a vector logical shift node. 3960/// 3961static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, 3962 unsigned NumBits, SelectionDAG &DAG, 3963 const TargetLowering &TLI, DebugLoc dl) { 3964 EVT ShVT = MVT::v2i64; 3965 unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL; 3966 SrcOp = DAG.getNode(ISD::BIT_CONVERT, dl, ShVT, SrcOp); 3967 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, 3968 DAG.getNode(Opc, dl, ShVT, SrcOp, 3969 DAG.getConstant(NumBits, TLI.getShiftAmountTy()))); 3970} 3971 3972SDValue 3973X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, 3974 SelectionDAG &DAG) const { 3975 3976 // Check if the scalar load can be widened into a vector load. And if 3977 // the address is "base + cst" see if the cst can be "absorbed" into 3978 // the shuffle mask. 3979 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) { 3980 SDValue Ptr = LD->getBasePtr(); 3981 if (!ISD::isNormalLoad(LD) || LD->isVolatile()) 3982 return SDValue(); 3983 EVT PVT = LD->getValueType(0); 3984 if (PVT != MVT::i32 && PVT != MVT::f32) 3985 return SDValue(); 3986 3987 int FI = -1; 3988 int64_t Offset = 0; 3989 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) { 3990 FI = FINode->getIndex(); 3991 Offset = 0; 3992 } else if (Ptr.getOpcode() == ISD::ADD && 3993 isa<ConstantSDNode>(Ptr.getOperand(1)) && 3994 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 3995 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 3996 Offset = Ptr.getConstantOperandVal(1); 3997 Ptr = Ptr.getOperand(0); 3998 } else { 3999 return SDValue(); 4000 } 4001 4002 SDValue Chain = LD->getChain(); 4003 // Make sure the stack object alignment is at least 16. 4004 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 4005 if (DAG.InferPtrAlignment(Ptr) < 16) { 4006 if (MFI->isFixedObjectIndex(FI)) { 4007 // Can't change the alignment. FIXME: It's possible to compute 4008 // the exact stack offset and reference FI + adjust offset instead. 4009 // If someone *really* cares about this. That's the way to implement it. 4010 return SDValue(); 4011 } else { 4012 MFI->setObjectAlignment(FI, 16); 4013 } 4014 } 4015 4016 // (Offset % 16) must be multiple of 4. Then address is then 4017 // Ptr + (Offset & ~15). 4018 if (Offset < 0) 4019 return SDValue(); 4020 if ((Offset % 16) & 3) 4021 return SDValue(); 4022 int64_t StartOffset = Offset & ~15; 4023 if (StartOffset) 4024 Ptr = DAG.getNode(ISD::ADD, Ptr.getDebugLoc(), Ptr.getValueType(), 4025 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType())); 4026 4027 int EltNo = (Offset - StartOffset) >> 2; 4028 int Mask[4] = { EltNo, EltNo, EltNo, EltNo }; 4029 EVT VT = (PVT == MVT::i32) ? MVT::v4i32 : MVT::v4f32; 4030 SDValue V1 = DAG.getLoad(VT, dl, Chain, Ptr, 4031 LD->getPointerInfo().getWithOffset(StartOffset), 4032 false, false, 0); 4033 // Canonicalize it to a v4i32 shuffle. 4034 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, V1); 4035 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, 4036 DAG.getVectorShuffle(MVT::v4i32, dl, V1, 4037 DAG.getUNDEF(MVT::v4i32),&Mask[0])); 4038 } 4039 4040 return SDValue(); 4041} 4042 4043/// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a 4044/// vector of type 'VT', see if the elements can be replaced by a single large 4045/// load which has the same value as a build_vector whose operands are 'elts'. 4046/// 4047/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a 4048/// 4049/// FIXME: we'd also like to handle the case where the last elements are zero 4050/// rather than undef via VZEXT_LOAD, but we do not detect that case today. 4051/// There's even a handy isZeroNode for that purpose. 4052static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, 4053 DebugLoc &DL, SelectionDAG &DAG) { 4054 EVT EltVT = VT.getVectorElementType(); 4055 unsigned NumElems = Elts.size(); 4056 4057 LoadSDNode *LDBase = NULL; 4058 unsigned LastLoadedElt = -1U; 4059 4060 // For each element in the initializer, see if we've found a load or an undef. 4061 // If we don't find an initial load element, or later load elements are 4062 // non-consecutive, bail out. 4063 for (unsigned i = 0; i < NumElems; ++i) { 4064 SDValue Elt = Elts[i]; 4065 4066 if (!Elt.getNode() || 4067 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode()))) 4068 return SDValue(); 4069 if (!LDBase) { 4070 if (Elt.getNode()->getOpcode() == ISD::UNDEF) 4071 return SDValue(); 4072 LDBase = cast<LoadSDNode>(Elt.getNode()); 4073 LastLoadedElt = i; 4074 continue; 4075 } 4076 if (Elt.getOpcode() == ISD::UNDEF) 4077 continue; 4078 4079 LoadSDNode *LD = cast<LoadSDNode>(Elt); 4080 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i)) 4081 return SDValue(); 4082 LastLoadedElt = i; 4083 } 4084 4085 // If we have found an entire vector of loads and undefs, then return a large 4086 // load of the entire vector width starting at the base pointer. If we found 4087 // consecutive loads for the low half, generate a vzext_load node. 4088 if (LastLoadedElt == NumElems - 1) { 4089 if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16) 4090 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 4091 LDBase->getPointerInfo(), 4092 LDBase->isVolatile(), LDBase->isNonTemporal(), 0); 4093 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 4094 LDBase->getPointerInfo(), 4095 LDBase->isVolatile(), LDBase->isNonTemporal(), 4096 LDBase->getAlignment()); 4097 } else if (NumElems == 4 && LastLoadedElt == 1) { 4098 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other); 4099 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() }; 4100 SDValue ResNode = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, 4101 Ops, 2, MVT::i32, 4102 LDBase->getMemOperand()); 4103 return DAG.getNode(ISD::BIT_CONVERT, DL, VT, ResNode); 4104 } 4105 return SDValue(); 4106} 4107 4108SDValue 4109X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { 4110 DebugLoc dl = Op.getDebugLoc(); 4111 // All zero's are handled with pxor in SSE2 and above, xorps in SSE1. 4112 // All one's are handled with pcmpeqd. In AVX, zero's are handled with 4113 // vpxor in 128-bit and xor{pd,ps} in 256-bit, but no 256 version of pcmpeqd 4114 // is present, so AllOnes is ignored. 4115 if (ISD::isBuildVectorAllZeros(Op.getNode()) || 4116 (Op.getValueType().getSizeInBits() != 256 && 4117 ISD::isBuildVectorAllOnes(Op.getNode()))) { 4118 // Canonicalize this to <4 x i32> (SSE) to 4119 // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are 4120 // eliminated on x86-32 hosts. 4121 if (Op.getValueType() == MVT::v4i32) 4122 return Op; 4123 4124 if (ISD::isBuildVectorAllOnes(Op.getNode())) 4125 return getOnesVector(Op.getValueType(), DAG, dl); 4126 return getZeroVector(Op.getValueType(), Subtarget->hasSSE2(), DAG, dl); 4127 } 4128 4129 EVT VT = Op.getValueType(); 4130 EVT ExtVT = VT.getVectorElementType(); 4131 unsigned EVTBits = ExtVT.getSizeInBits(); 4132 4133 unsigned NumElems = Op.getNumOperands(); 4134 unsigned NumZero = 0; 4135 unsigned NumNonZero = 0; 4136 unsigned NonZeros = 0; 4137 bool IsAllConstants = true; 4138 SmallSet<SDValue, 8> Values; 4139 for (unsigned i = 0; i < NumElems; ++i) { 4140 SDValue Elt = Op.getOperand(i); 4141 if (Elt.getOpcode() == ISD::UNDEF) 4142 continue; 4143 Values.insert(Elt); 4144 if (Elt.getOpcode() != ISD::Constant && 4145 Elt.getOpcode() != ISD::ConstantFP) 4146 IsAllConstants = false; 4147 if (X86::isZeroNode(Elt)) 4148 NumZero++; 4149 else { 4150 NonZeros |= (1 << i); 4151 NumNonZero++; 4152 } 4153 } 4154 4155 // All undef vector. Return an UNDEF. All zero vectors were handled above. 4156 if (NumNonZero == 0) 4157 return DAG.getUNDEF(VT); 4158 4159 // Special case for single non-zero, non-undef, element. 4160 if (NumNonZero == 1) { 4161 unsigned Idx = CountTrailingZeros_32(NonZeros); 4162 SDValue Item = Op.getOperand(Idx); 4163 4164 // If this is an insertion of an i64 value on x86-32, and if the top bits of 4165 // the value are obviously zero, truncate the value to i32 and do the 4166 // insertion that way. Only do this if the value is non-constant or if the 4167 // value is a constant being inserted into element 0. It is cheaper to do 4168 // a constant pool load than it is to do a movd + shuffle. 4169 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() && 4170 (!IsAllConstants || Idx == 0)) { 4171 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 4172 // Handle SSE only. 4173 assert(VT == MVT::v2i64 && "Expected an SSE value type!"); 4174 EVT VecVT = MVT::v4i32; 4175 unsigned VecElts = 4; 4176 4177 // Truncate the value (which may itself be a constant) to i32, and 4178 // convert it to a vector with movd (S2V+shuffle to zero extend). 4179 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item); 4180 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item); 4181 Item = getShuffleVectorZeroOrUndef(Item, 0, true, 4182 Subtarget->hasSSE2(), DAG); 4183 4184 // Now we have our 32-bit value zero extended in the low element of 4185 // a vector. If Idx != 0, swizzle it into place. 4186 if (Idx != 0) { 4187 SmallVector<int, 4> Mask; 4188 Mask.push_back(Idx); 4189 for (unsigned i = 1; i != VecElts; ++i) 4190 Mask.push_back(i); 4191 Item = DAG.getVectorShuffle(VecVT, dl, Item, 4192 DAG.getUNDEF(Item.getValueType()), 4193 &Mask[0]); 4194 } 4195 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Item); 4196 } 4197 } 4198 4199 // If we have a constant or non-constant insertion into the low element of 4200 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 4201 // the rest of the elements. This will be matched as movd/movq/movss/movsd 4202 // depending on what the source datatype is. 4203 if (Idx == 0) { 4204 if (NumZero == 0) { 4205 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 4206 } else if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 || 4207 (ExtVT == MVT::i64 && Subtarget->is64Bit())) { 4208 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 4209 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 4210 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget->hasSSE2(), 4211 DAG); 4212 } else if (ExtVT == MVT::i16 || ExtVT == MVT::i8) { 4213 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item); 4214 assert(VT.getSizeInBits() == 128 && "Expected an SSE value type!"); 4215 EVT MiddleVT = MVT::v4i32; 4216 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MiddleVT, Item); 4217 Item = getShuffleVectorZeroOrUndef(Item, 0, true, 4218 Subtarget->hasSSE2(), DAG); 4219 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Item); 4220 } 4221 } 4222 4223 // Is it a vector logical left shift? 4224 if (NumElems == 2 && Idx == 1 && 4225 X86::isZeroNode(Op.getOperand(0)) && 4226 !X86::isZeroNode(Op.getOperand(1))) { 4227 unsigned NumBits = VT.getSizeInBits(); 4228 return getVShift(true, VT, 4229 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 4230 VT, Op.getOperand(1)), 4231 NumBits/2, DAG, *this, dl); 4232 } 4233 4234 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 4235 return SDValue(); 4236 4237 // Otherwise, if this is a vector with i32 or f32 elements, and the element 4238 // is a non-constant being inserted into an element other than the low one, 4239 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 4240 // movd/movss) to move this into the low element, then shuffle it into 4241 // place. 4242 if (EVTBits == 32) { 4243 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 4244 4245 // Turn it into a shuffle of zero and zero-extended scalar to vector. 4246 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, 4247 Subtarget->hasSSE2(), DAG); 4248 SmallVector<int, 8> MaskVec; 4249 for (unsigned i = 0; i < NumElems; i++) 4250 MaskVec.push_back(i == Idx ? 0 : 1); 4251 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]); 4252 } 4253 } 4254 4255 // Splat is obviously ok. Let legalizer expand it to a shuffle. 4256 if (Values.size() == 1) { 4257 if (EVTBits == 32) { 4258 // Instead of a shuffle like this: 4259 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0> 4260 // Check if it's possible to issue this instead. 4261 // shuffle (vload ptr)), undef, <1, 1, 1, 1> 4262 unsigned Idx = CountTrailingZeros_32(NonZeros); 4263 SDValue Item = Op.getOperand(Idx); 4264 if (Op.getNode()->isOnlyUserOf(Item.getNode())) 4265 return LowerAsSplatVectorLoad(Item, VT, dl, DAG); 4266 } 4267 return SDValue(); 4268 } 4269 4270 // A vector full of immediates; various special cases are already 4271 // handled, so this is best done with a single constant-pool load. 4272 if (IsAllConstants) 4273 return SDValue(); 4274 4275 // Let legalizer expand 2-wide build_vectors. 4276 if (EVTBits == 64) { 4277 if (NumNonZero == 1) { 4278 // One half is zero or undef. 4279 unsigned Idx = CountTrailingZeros_32(NonZeros); 4280 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, 4281 Op.getOperand(Idx)); 4282 return getShuffleVectorZeroOrUndef(V2, Idx, true, 4283 Subtarget->hasSSE2(), DAG); 4284 } 4285 return SDValue(); 4286 } 4287 4288 // If element VT is < 32 bits, convert it to inserts into a zero vector. 4289 if (EVTBits == 8 && NumElems == 16) { 4290 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 4291 *this); 4292 if (V.getNode()) return V; 4293 } 4294 4295 if (EVTBits == 16 && NumElems == 8) { 4296 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 4297 *this); 4298 if (V.getNode()) return V; 4299 } 4300 4301 // If element VT is == 32 bits, turn it into a number of shuffles. 4302 SmallVector<SDValue, 8> V; 4303 V.resize(NumElems); 4304 if (NumElems == 4 && NumZero > 0) { 4305 for (unsigned i = 0; i < 4; ++i) { 4306 bool isZero = !(NonZeros & (1 << i)); 4307 if (isZero) 4308 V[i] = getZeroVector(VT, Subtarget->hasSSE2(), DAG, dl); 4309 else 4310 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 4311 } 4312 4313 for (unsigned i = 0; i < 2; ++i) { 4314 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 4315 default: break; 4316 case 0: 4317 V[i] = V[i*2]; // Must be a zero vector. 4318 break; 4319 case 1: 4320 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]); 4321 break; 4322 case 2: 4323 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]); 4324 break; 4325 case 3: 4326 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]); 4327 break; 4328 } 4329 } 4330 4331 SmallVector<int, 8> MaskVec; 4332 bool Reverse = (NonZeros & 0x3) == 2; 4333 for (unsigned i = 0; i < 2; ++i) 4334 MaskVec.push_back(Reverse ? 1-i : i); 4335 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 4336 for (unsigned i = 0; i < 2; ++i) 4337 MaskVec.push_back(Reverse ? 1-i+NumElems : i+NumElems); 4338 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]); 4339 } 4340 4341 if (Values.size() > 1 && VT.getSizeInBits() == 128) { 4342 // Check for a build vector of consecutive loads. 4343 for (unsigned i = 0; i < NumElems; ++i) 4344 V[i] = Op.getOperand(i); 4345 4346 // Check for elements which are consecutive loads. 4347 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG); 4348 if (LD.getNode()) 4349 return LD; 4350 4351 // For SSE 4.1, use insertps to put the high elements into the low element. 4352 if (getSubtarget()->hasSSE41()) { 4353 SDValue Result; 4354 if (Op.getOperand(0).getOpcode() != ISD::UNDEF) 4355 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); 4356 else 4357 Result = DAG.getUNDEF(VT); 4358 4359 for (unsigned i = 1; i < NumElems; ++i) { 4360 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue; 4361 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, 4362 Op.getOperand(i), DAG.getIntPtrConstant(i)); 4363 } 4364 return Result; 4365 } 4366 4367 // Otherwise, expand into a number of unpckl*, start by extending each of 4368 // our (non-undef) elements to the full vector width with the element in the 4369 // bottom slot of the vector (which generates no code for SSE). 4370 for (unsigned i = 0; i < NumElems; ++i) { 4371 if (Op.getOperand(i).getOpcode() != ISD::UNDEF) 4372 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 4373 else 4374 V[i] = DAG.getUNDEF(VT); 4375 } 4376 4377 // Next, we iteratively mix elements, e.g. for v4f32: 4378 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 4379 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 4380 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 4381 unsigned EltStride = NumElems >> 1; 4382 while (EltStride != 0) { 4383 for (unsigned i = 0; i < EltStride; ++i) { 4384 // If V[i+EltStride] is undef and this is the first round of mixing, 4385 // then it is safe to just drop this shuffle: V[i] is already in the 4386 // right place, the one element (since it's the first round) being 4387 // inserted as undef can be dropped. This isn't safe for successive 4388 // rounds because they will permute elements within both vectors. 4389 if (V[i+EltStride].getOpcode() == ISD::UNDEF && 4390 EltStride == NumElems/2) 4391 continue; 4392 4393 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]); 4394 } 4395 EltStride >>= 1; 4396 } 4397 return V[0]; 4398 } 4399 return SDValue(); 4400} 4401 4402SDValue 4403X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const { 4404 // We support concatenate two MMX registers and place them in a MMX 4405 // register. This is better than doing a stack convert. 4406 DebugLoc dl = Op.getDebugLoc(); 4407 EVT ResVT = Op.getValueType(); 4408 assert(Op.getNumOperands() == 2); 4409 assert(ResVT == MVT::v2i64 || ResVT == MVT::v4i32 || 4410 ResVT == MVT::v8i16 || ResVT == MVT::v16i8); 4411 int Mask[2]; 4412 SDValue InVec = DAG.getNode(ISD::BIT_CONVERT,dl, MVT::v1i64, Op.getOperand(0)); 4413 SDValue VecOp = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec); 4414 InVec = Op.getOperand(1); 4415 if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4416 unsigned NumElts = ResVT.getVectorNumElements(); 4417 VecOp = DAG.getNode(ISD::BIT_CONVERT, dl, ResVT, VecOp); 4418 VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ResVT, VecOp, 4419 InVec.getOperand(0), DAG.getIntPtrConstant(NumElts/2+1)); 4420 } else { 4421 InVec = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v1i64, InVec); 4422 SDValue VecOp2 = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec); 4423 Mask[0] = 0; Mask[1] = 2; 4424 VecOp = DAG.getVectorShuffle(MVT::v2i64, dl, VecOp, VecOp2, Mask); 4425 } 4426 return DAG.getNode(ISD::BIT_CONVERT, dl, ResVT, VecOp); 4427} 4428 4429// v8i16 shuffles - Prefer shuffles in the following order: 4430// 1. [all] pshuflw, pshufhw, optional move 4431// 2. [ssse3] 1 x pshufb 4432// 3. [ssse3] 2 x pshufb + 1 x por 4433// 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw) 4434SDValue 4435X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op, 4436 SelectionDAG &DAG) const { 4437 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 4438 SDValue V1 = SVOp->getOperand(0); 4439 SDValue V2 = SVOp->getOperand(1); 4440 DebugLoc dl = SVOp->getDebugLoc(); 4441 SmallVector<int, 8> MaskVals; 4442 4443 // Determine if more than 1 of the words in each of the low and high quadwords 4444 // of the result come from the same quadword of one of the two inputs. Undef 4445 // mask values count as coming from any quadword, for better codegen. 4446 SmallVector<unsigned, 4> LoQuad(4); 4447 SmallVector<unsigned, 4> HiQuad(4); 4448 BitVector InputQuads(4); 4449 for (unsigned i = 0; i < 8; ++i) { 4450 SmallVectorImpl<unsigned> &Quad = i < 4 ? LoQuad : HiQuad; 4451 int EltIdx = SVOp->getMaskElt(i); 4452 MaskVals.push_back(EltIdx); 4453 if (EltIdx < 0) { 4454 ++Quad[0]; 4455 ++Quad[1]; 4456 ++Quad[2]; 4457 ++Quad[3]; 4458 continue; 4459 } 4460 ++Quad[EltIdx / 4]; 4461 InputQuads.set(EltIdx / 4); 4462 } 4463 4464 int BestLoQuad = -1; 4465 unsigned MaxQuad = 1; 4466 for (unsigned i = 0; i < 4; ++i) { 4467 if (LoQuad[i] > MaxQuad) { 4468 BestLoQuad = i; 4469 MaxQuad = LoQuad[i]; 4470 } 4471 } 4472 4473 int BestHiQuad = -1; 4474 MaxQuad = 1; 4475 for (unsigned i = 0; i < 4; ++i) { 4476 if (HiQuad[i] > MaxQuad) { 4477 BestHiQuad = i; 4478 MaxQuad = HiQuad[i]; 4479 } 4480 } 4481 4482 // For SSSE3, If all 8 words of the result come from only 1 quadword of each 4483 // of the two input vectors, shuffle them into one input vector so only a 4484 // single pshufb instruction is necessary. If There are more than 2 input 4485 // quads, disable the next transformation since it does not help SSSE3. 4486 bool V1Used = InputQuads[0] || InputQuads[1]; 4487 bool V2Used = InputQuads[2] || InputQuads[3]; 4488 if (Subtarget->hasSSSE3()) { 4489 if (InputQuads.count() == 2 && V1Used && V2Used) { 4490 BestLoQuad = InputQuads.find_first(); 4491 BestHiQuad = InputQuads.find_next(BestLoQuad); 4492 } 4493 if (InputQuads.count() > 2) { 4494 BestLoQuad = -1; 4495 BestHiQuad = -1; 4496 } 4497 } 4498 4499 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update 4500 // the shuffle mask. If a quad is scored as -1, that means that it contains 4501 // words from all 4 input quadwords. 4502 SDValue NewV; 4503 if (BestLoQuad >= 0 || BestHiQuad >= 0) { 4504 SmallVector<int, 8> MaskV; 4505 MaskV.push_back(BestLoQuad < 0 ? 0 : BestLoQuad); 4506 MaskV.push_back(BestHiQuad < 0 ? 1 : BestHiQuad); 4507 NewV = DAG.getVectorShuffle(MVT::v2i64, dl, 4508 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, V1), 4509 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, V2), &MaskV[0]); 4510 NewV = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, NewV); 4511 4512 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the 4513 // source words for the shuffle, to aid later transformations. 4514 bool AllWordsInNewV = true; 4515 bool InOrder[2] = { true, true }; 4516 for (unsigned i = 0; i != 8; ++i) { 4517 int idx = MaskVals[i]; 4518 if (idx != (int)i) 4519 InOrder[i/4] = false; 4520 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad) 4521 continue; 4522 AllWordsInNewV = false; 4523 break; 4524 } 4525 4526 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV; 4527 if (AllWordsInNewV) { 4528 for (int i = 0; i != 8; ++i) { 4529 int idx = MaskVals[i]; 4530 if (idx < 0) 4531 continue; 4532 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4; 4533 if ((idx != i) && idx < 4) 4534 pshufhw = false; 4535 if ((idx != i) && idx > 3) 4536 pshuflw = false; 4537 } 4538 V1 = NewV; 4539 V2Used = false; 4540 BestLoQuad = 0; 4541 BestHiQuad = 1; 4542 } 4543 4544 // If we've eliminated the use of V2, and the new mask is a pshuflw or 4545 // pshufhw, that's as cheap as it gets. Return the new shuffle. 4546 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) { 4547 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW; 4548 unsigned TargetMask = 0; 4549 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, 4550 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]); 4551 TargetMask = pshufhw ? X86::getShufflePSHUFHWImmediate(NewV.getNode()): 4552 X86::getShufflePSHUFLWImmediate(NewV.getNode()); 4553 V1 = NewV.getOperand(0); 4554 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG); 4555 } 4556 } 4557 4558 // If we have SSSE3, and all words of the result are from 1 input vector, 4559 // case 2 is generated, otherwise case 3 is generated. If no SSSE3 4560 // is present, fall back to case 4. 4561 if (Subtarget->hasSSSE3()) { 4562 SmallVector<SDValue,16> pshufbMask; 4563 4564 // If we have elements from both input vectors, set the high bit of the 4565 // shuffle mask element to zero out elements that come from V2 in the V1 4566 // mask, and elements that come from V1 in the V2 mask, so that the two 4567 // results can be OR'd together. 4568 bool TwoInputs = V1Used && V2Used; 4569 for (unsigned i = 0; i != 8; ++i) { 4570 int EltIdx = MaskVals[i] * 2; 4571 if (TwoInputs && (EltIdx >= 16)) { 4572 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 4573 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 4574 continue; 4575 } 4576 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 4577 pshufbMask.push_back(DAG.getConstant(EltIdx+1, MVT::i8)); 4578 } 4579 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V1); 4580 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 4581 DAG.getNode(ISD::BUILD_VECTOR, dl, 4582 MVT::v16i8, &pshufbMask[0], 16)); 4583 if (!TwoInputs) 4584 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1); 4585 4586 // Calculate the shuffle mask for the second input, shuffle it, and 4587 // OR it with the first shuffled input. 4588 pshufbMask.clear(); 4589 for (unsigned i = 0; i != 8; ++i) { 4590 int EltIdx = MaskVals[i] * 2; 4591 if (EltIdx < 16) { 4592 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 4593 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 4594 continue; 4595 } 4596 pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8)); 4597 pshufbMask.push_back(DAG.getConstant(EltIdx - 15, MVT::i8)); 4598 } 4599 V2 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V2); 4600 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 4601 DAG.getNode(ISD::BUILD_VECTOR, dl, 4602 MVT::v16i8, &pshufbMask[0], 16)); 4603 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 4604 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1); 4605 } 4606 4607 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order, 4608 // and update MaskVals with new element order. 4609 BitVector InOrder(8); 4610 if (BestLoQuad >= 0) { 4611 SmallVector<int, 8> MaskV; 4612 for (int i = 0; i != 4; ++i) { 4613 int idx = MaskVals[i]; 4614 if (idx < 0) { 4615 MaskV.push_back(-1); 4616 InOrder.set(i); 4617 } else if ((idx / 4) == BestLoQuad) { 4618 MaskV.push_back(idx & 3); 4619 InOrder.set(i); 4620 } else { 4621 MaskV.push_back(-1); 4622 } 4623 } 4624 for (unsigned i = 4; i != 8; ++i) 4625 MaskV.push_back(i); 4626 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 4627 &MaskV[0]); 4628 4629 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) 4630 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16, 4631 NewV.getOperand(0), 4632 X86::getShufflePSHUFLWImmediate(NewV.getNode()), 4633 DAG); 4634 } 4635 4636 // If BestHi >= 0, generate a pshufhw to put the high elements in order, 4637 // and update MaskVals with the new element order. 4638 if (BestHiQuad >= 0) { 4639 SmallVector<int, 8> MaskV; 4640 for (unsigned i = 0; i != 4; ++i) 4641 MaskV.push_back(i); 4642 for (unsigned i = 4; i != 8; ++i) { 4643 int idx = MaskVals[i]; 4644 if (idx < 0) { 4645 MaskV.push_back(-1); 4646 InOrder.set(i); 4647 } else if ((idx / 4) == BestHiQuad) { 4648 MaskV.push_back((idx & 3) + 4); 4649 InOrder.set(i); 4650 } else { 4651 MaskV.push_back(-1); 4652 } 4653 } 4654 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 4655 &MaskV[0]); 4656 4657 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) 4658 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16, 4659 NewV.getOperand(0), 4660 X86::getShufflePSHUFHWImmediate(NewV.getNode()), 4661 DAG); 4662 } 4663 4664 // In case BestHi & BestLo were both -1, which means each quadword has a word 4665 // from each of the four input quadwords, calculate the InOrder bitvector now 4666 // before falling through to the insert/extract cleanup. 4667 if (BestLoQuad == -1 && BestHiQuad == -1) { 4668 NewV = V1; 4669 for (int i = 0; i != 8; ++i) 4670 if (MaskVals[i] < 0 || MaskVals[i] == i) 4671 InOrder.set(i); 4672 } 4673 4674 // The other elements are put in the right place using pextrw and pinsrw. 4675 for (unsigned i = 0; i != 8; ++i) { 4676 if (InOrder[i]) 4677 continue; 4678 int EltIdx = MaskVals[i]; 4679 if (EltIdx < 0) 4680 continue; 4681 SDValue ExtOp = (EltIdx < 8) 4682 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1, 4683 DAG.getIntPtrConstant(EltIdx)) 4684 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2, 4685 DAG.getIntPtrConstant(EltIdx - 8)); 4686 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp, 4687 DAG.getIntPtrConstant(i)); 4688 } 4689 return NewV; 4690} 4691 4692// v16i8 shuffles - Prefer shuffles in the following order: 4693// 1. [ssse3] 1 x pshufb 4694// 2. [ssse3] 2 x pshufb + 1 x por 4695// 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw 4696static 4697SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, 4698 SelectionDAG &DAG, 4699 const X86TargetLowering &TLI) { 4700 SDValue V1 = SVOp->getOperand(0); 4701 SDValue V2 = SVOp->getOperand(1); 4702 DebugLoc dl = SVOp->getDebugLoc(); 4703 SmallVector<int, 16> MaskVals; 4704 SVOp->getMask(MaskVals); 4705 4706 // If we have SSSE3, case 1 is generated when all result bytes come from 4707 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is 4708 // present, fall back to case 3. 4709 // FIXME: kill V2Only once shuffles are canonizalized by getNode. 4710 bool V1Only = true; 4711 bool V2Only = true; 4712 for (unsigned i = 0; i < 16; ++i) { 4713 int EltIdx = MaskVals[i]; 4714 if (EltIdx < 0) 4715 continue; 4716 if (EltIdx < 16) 4717 V2Only = false; 4718 else 4719 V1Only = false; 4720 } 4721 4722 // If SSSE3, use 1 pshufb instruction per vector with elements in the result. 4723 if (TLI.getSubtarget()->hasSSSE3()) { 4724 SmallVector<SDValue,16> pshufbMask; 4725 4726 // If all result elements are from one input vector, then only translate 4727 // undef mask values to 0x80 (zero out result) in the pshufb mask. 4728 // 4729 // Otherwise, we have elements from both input vectors, and must zero out 4730 // elements that come from V2 in the first mask, and V1 in the second mask 4731 // so that we can OR them together. 4732 bool TwoInputs = !(V1Only || V2Only); 4733 for (unsigned i = 0; i != 16; ++i) { 4734 int EltIdx = MaskVals[i]; 4735 if (EltIdx < 0 || (TwoInputs && EltIdx >= 16)) { 4736 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 4737 continue; 4738 } 4739 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 4740 } 4741 // If all the elements are from V2, assign it to V1 and return after 4742 // building the first pshufb. 4743 if (V2Only) 4744 V1 = V2; 4745 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 4746 DAG.getNode(ISD::BUILD_VECTOR, dl, 4747 MVT::v16i8, &pshufbMask[0], 16)); 4748 if (!TwoInputs) 4749 return V1; 4750 4751 // Calculate the shuffle mask for the second input, shuffle it, and 4752 // OR it with the first shuffled input. 4753 pshufbMask.clear(); 4754 for (unsigned i = 0; i != 16; ++i) { 4755 int EltIdx = MaskVals[i]; 4756 if (EltIdx < 16) { 4757 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 4758 continue; 4759 } 4760 pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8)); 4761 } 4762 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 4763 DAG.getNode(ISD::BUILD_VECTOR, dl, 4764 MVT::v16i8, &pshufbMask[0], 16)); 4765 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 4766 } 4767 4768 // No SSSE3 - Calculate in place words and then fix all out of place words 4769 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from 4770 // the 16 different words that comprise the two doublequadword input vectors. 4771 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1); 4772 V2 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V2); 4773 SDValue NewV = V2Only ? V2 : V1; 4774 for (int i = 0; i != 8; ++i) { 4775 int Elt0 = MaskVals[i*2]; 4776 int Elt1 = MaskVals[i*2+1]; 4777 4778 // This word of the result is all undef, skip it. 4779 if (Elt0 < 0 && Elt1 < 0) 4780 continue; 4781 4782 // This word of the result is already in the correct place, skip it. 4783 if (V1Only && (Elt0 == i*2) && (Elt1 == i*2+1)) 4784 continue; 4785 if (V2Only && (Elt0 == i*2+16) && (Elt1 == i*2+17)) 4786 continue; 4787 4788 SDValue Elt0Src = Elt0 < 16 ? V1 : V2; 4789 SDValue Elt1Src = Elt1 < 16 ? V1 : V2; 4790 SDValue InsElt; 4791 4792 // If Elt0 and Elt1 are defined, are consecutive, and can be load 4793 // using a single extract together, load it and store it. 4794 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) { 4795 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 4796 DAG.getIntPtrConstant(Elt1 / 2)); 4797 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 4798 DAG.getIntPtrConstant(i)); 4799 continue; 4800 } 4801 4802 // If Elt1 is defined, extract it from the appropriate source. If the 4803 // source byte is not also odd, shift the extracted word left 8 bits 4804 // otherwise clear the bottom 8 bits if we need to do an or. 4805 if (Elt1 >= 0) { 4806 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 4807 DAG.getIntPtrConstant(Elt1 / 2)); 4808 if ((Elt1 & 1) == 0) 4809 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt, 4810 DAG.getConstant(8, TLI.getShiftAmountTy())); 4811 else if (Elt0 >= 0) 4812 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt, 4813 DAG.getConstant(0xFF00, MVT::i16)); 4814 } 4815 // If Elt0 is defined, extract it from the appropriate source. If the 4816 // source byte is not also even, shift the extracted word right 8 bits. If 4817 // Elt1 was also defined, OR the extracted values together before 4818 // inserting them in the result. 4819 if (Elt0 >= 0) { 4820 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, 4821 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2)); 4822 if ((Elt0 & 1) != 0) 4823 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0, 4824 DAG.getConstant(8, TLI.getShiftAmountTy())); 4825 else if (Elt1 >= 0) 4826 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0, 4827 DAG.getConstant(0x00FF, MVT::i16)); 4828 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0) 4829 : InsElt0; 4830 } 4831 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 4832 DAG.getIntPtrConstant(i)); 4833 } 4834 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, NewV); 4835} 4836 4837/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 4838/// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be 4839/// done when every pair / quad of shuffle mask elements point to elements in 4840/// the right sequence. e.g. 4841/// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15> 4842static 4843SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, 4844 SelectionDAG &DAG, DebugLoc dl) { 4845 EVT VT = SVOp->getValueType(0); 4846 SDValue V1 = SVOp->getOperand(0); 4847 SDValue V2 = SVOp->getOperand(1); 4848 unsigned NumElems = VT.getVectorNumElements(); 4849 unsigned NewWidth = (NumElems == 4) ? 2 : 4; 4850 EVT NewVT; 4851 switch (VT.getSimpleVT().SimpleTy) { 4852 default: assert(false && "Unexpected!"); 4853 case MVT::v4f32: NewVT = MVT::v2f64; break; 4854 case MVT::v4i32: NewVT = MVT::v2i64; break; 4855 case MVT::v8i16: NewVT = MVT::v4i32; break; 4856 case MVT::v16i8: NewVT = MVT::v4i32; break; 4857 } 4858 4859 int Scale = NumElems / NewWidth; 4860 SmallVector<int, 8> MaskVec; 4861 for (unsigned i = 0; i < NumElems; i += Scale) { 4862 int StartIdx = -1; 4863 for (int j = 0; j < Scale; ++j) { 4864 int EltIdx = SVOp->getMaskElt(i+j); 4865 if (EltIdx < 0) 4866 continue; 4867 if (StartIdx == -1) 4868 StartIdx = EltIdx - (EltIdx % Scale); 4869 if (EltIdx != StartIdx + j) 4870 return SDValue(); 4871 } 4872 if (StartIdx == -1) 4873 MaskVec.push_back(-1); 4874 else 4875 MaskVec.push_back(StartIdx / Scale); 4876 } 4877 4878 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, V1); 4879 V2 = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, V2); 4880 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]); 4881} 4882 4883/// getVZextMovL - Return a zero-extending vector move low node. 4884/// 4885static SDValue getVZextMovL(EVT VT, EVT OpVT, 4886 SDValue SrcOp, SelectionDAG &DAG, 4887 const X86Subtarget *Subtarget, DebugLoc dl) { 4888 if (VT == MVT::v2f64 || VT == MVT::v4f32) { 4889 LoadSDNode *LD = NULL; 4890 if (!isScalarLoadToVector(SrcOp.getNode(), &LD)) 4891 LD = dyn_cast<LoadSDNode>(SrcOp); 4892 if (!LD) { 4893 // movssrr and movsdrr do not clear top bits. Try to use movd, movq 4894 // instead. 4895 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; 4896 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) && 4897 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR && 4898 SrcOp.getOperand(0).getOpcode() == ISD::BIT_CONVERT && 4899 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) { 4900 // PR2108 4901 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32; 4902 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, 4903 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 4904 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 4905 OpVT, 4906 SrcOp.getOperand(0) 4907 .getOperand(0)))); 4908 } 4909 } 4910 } 4911 4912 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, 4913 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 4914 DAG.getNode(ISD::BIT_CONVERT, dl, 4915 OpVT, SrcOp))); 4916} 4917 4918/// LowerVECTOR_SHUFFLE_4wide - Handle all 4 wide cases with a number of 4919/// shuffles. 4920static SDValue 4921LowerVECTOR_SHUFFLE_4wide(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 4922 SDValue V1 = SVOp->getOperand(0); 4923 SDValue V2 = SVOp->getOperand(1); 4924 DebugLoc dl = SVOp->getDebugLoc(); 4925 EVT VT = SVOp->getValueType(0); 4926 4927 SmallVector<std::pair<int, int>, 8> Locs; 4928 Locs.resize(4); 4929 SmallVector<int, 8> Mask1(4U, -1); 4930 SmallVector<int, 8> PermMask; 4931 SVOp->getMask(PermMask); 4932 4933 unsigned NumHi = 0; 4934 unsigned NumLo = 0; 4935 for (unsigned i = 0; i != 4; ++i) { 4936 int Idx = PermMask[i]; 4937 if (Idx < 0) { 4938 Locs[i] = std::make_pair(-1, -1); 4939 } else { 4940 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!"); 4941 if (Idx < 4) { 4942 Locs[i] = std::make_pair(0, NumLo); 4943 Mask1[NumLo] = Idx; 4944 NumLo++; 4945 } else { 4946 Locs[i] = std::make_pair(1, NumHi); 4947 if (2+NumHi < 4) 4948 Mask1[2+NumHi] = Idx; 4949 NumHi++; 4950 } 4951 } 4952 } 4953 4954 if (NumLo <= 2 && NumHi <= 2) { 4955 // If no more than two elements come from either vector. This can be 4956 // implemented with two shuffles. First shuffle gather the elements. 4957 // The second shuffle, which takes the first shuffle as both of its 4958 // vector operands, put the elements into the right order. 4959 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 4960 4961 SmallVector<int, 8> Mask2(4U, -1); 4962 4963 for (unsigned i = 0; i != 4; ++i) { 4964 if (Locs[i].first == -1) 4965 continue; 4966 else { 4967 unsigned Idx = (i < 2) ? 0 : 4; 4968 Idx += Locs[i].first * 2 + Locs[i].second; 4969 Mask2[i] = Idx; 4970 } 4971 } 4972 4973 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]); 4974 } else if (NumLo == 3 || NumHi == 3) { 4975 // Otherwise, we must have three elements from one vector, call it X, and 4976 // one element from the other, call it Y. First, use a shufps to build an 4977 // intermediate vector with the one element from Y and the element from X 4978 // that will be in the same half in the final destination (the indexes don't 4979 // matter). Then, use a shufps to build the final vector, taking the half 4980 // containing the element from Y from the intermediate, and the other half 4981 // from X. 4982 if (NumHi == 3) { 4983 // Normalize it so the 3 elements come from V1. 4984 CommuteVectorShuffleMask(PermMask, VT); 4985 std::swap(V1, V2); 4986 } 4987 4988 // Find the element from V2. 4989 unsigned HiIndex; 4990 for (HiIndex = 0; HiIndex < 3; ++HiIndex) { 4991 int Val = PermMask[HiIndex]; 4992 if (Val < 0) 4993 continue; 4994 if (Val >= 4) 4995 break; 4996 } 4997 4998 Mask1[0] = PermMask[HiIndex]; 4999 Mask1[1] = -1; 5000 Mask1[2] = PermMask[HiIndex^1]; 5001 Mask1[3] = -1; 5002 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 5003 5004 if (HiIndex >= 2) { 5005 Mask1[0] = PermMask[0]; 5006 Mask1[1] = PermMask[1]; 5007 Mask1[2] = HiIndex & 1 ? 6 : 4; 5008 Mask1[3] = HiIndex & 1 ? 4 : 6; 5009 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 5010 } else { 5011 Mask1[0] = HiIndex & 1 ? 2 : 0; 5012 Mask1[1] = HiIndex & 1 ? 0 : 2; 5013 Mask1[2] = PermMask[2]; 5014 Mask1[3] = PermMask[3]; 5015 if (Mask1[2] >= 0) 5016 Mask1[2] += 4; 5017 if (Mask1[3] >= 0) 5018 Mask1[3] += 4; 5019 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]); 5020 } 5021 } 5022 5023 // Break it into (shuffle shuffle_hi, shuffle_lo). 5024 Locs.clear(); 5025 SmallVector<int,8> LoMask(4U, -1); 5026 SmallVector<int,8> HiMask(4U, -1); 5027 5028 SmallVector<int,8> *MaskPtr = &LoMask; 5029 unsigned MaskIdx = 0; 5030 unsigned LoIdx = 0; 5031 unsigned HiIdx = 2; 5032 for (unsigned i = 0; i != 4; ++i) { 5033 if (i == 2) { 5034 MaskPtr = &HiMask; 5035 MaskIdx = 1; 5036 LoIdx = 0; 5037 HiIdx = 2; 5038 } 5039 int Idx = PermMask[i]; 5040 if (Idx < 0) { 5041 Locs[i] = std::make_pair(-1, -1); 5042 } else if (Idx < 4) { 5043 Locs[i] = std::make_pair(MaskIdx, LoIdx); 5044 (*MaskPtr)[LoIdx] = Idx; 5045 LoIdx++; 5046 } else { 5047 Locs[i] = std::make_pair(MaskIdx, HiIdx); 5048 (*MaskPtr)[HiIdx] = Idx; 5049 HiIdx++; 5050 } 5051 } 5052 5053 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]); 5054 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]); 5055 SmallVector<int, 8> MaskOps; 5056 for (unsigned i = 0; i != 4; ++i) { 5057 if (Locs[i].first == -1) { 5058 MaskOps.push_back(-1); 5059 } else { 5060 unsigned Idx = Locs[i].first * 4 + Locs[i].second; 5061 MaskOps.push_back(Idx); 5062 } 5063 } 5064 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]); 5065} 5066 5067static bool MayFoldVectorLoad(SDValue V) { 5068 if (V.hasOneUse() && V.getOpcode() == ISD::BIT_CONVERT) 5069 V = V.getOperand(0); 5070 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 5071 V = V.getOperand(0); 5072 if (MayFoldLoad(V)) 5073 return true; 5074 return false; 5075} 5076 5077// FIXME: the version above should always be used. Since there's 5078// a bug where several vector shuffles can't be folded because the 5079// DAG is not updated during lowering and a node claims to have two 5080// uses while it only has one, use this version, and let isel match 5081// another instruction if the load really happens to have more than 5082// one use. Remove this version after this bug get fixed. 5083// rdar://8434668, PR8156 5084static bool RelaxedMayFoldVectorLoad(SDValue V) { 5085 if (V.hasOneUse() && V.getOpcode() == ISD::BIT_CONVERT) 5086 V = V.getOperand(0); 5087 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 5088 V = V.getOperand(0); 5089 if (ISD::isNormalLoad(V.getNode())) 5090 return true; 5091 return false; 5092} 5093 5094/// CanFoldShuffleIntoVExtract - Check if the current shuffle is used by 5095/// a vector extract, and if both can be later optimized into a single load. 5096/// This is done in visitEXTRACT_VECTOR_ELT and the conditions are checked 5097/// here because otherwise a target specific shuffle node is going to be 5098/// emitted for this shuffle, and the optimization not done. 5099/// FIXME: This is probably not the best approach, but fix the problem 5100/// until the right path is decided. 5101static 5102bool CanXFormVExtractWithShuffleIntoLoad(SDValue V, SelectionDAG &DAG, 5103 const TargetLowering &TLI) { 5104 EVT VT = V.getValueType(); 5105 ShuffleVectorSDNode *SVOp = dyn_cast<ShuffleVectorSDNode>(V); 5106 5107 // Be sure that the vector shuffle is present in a pattern like this: 5108 // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), c) -> (f32 load $addr) 5109 if (!V.hasOneUse()) 5110 return false; 5111 5112 SDNode *N = *V.getNode()->use_begin(); 5113 if (N->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 5114 return false; 5115 5116 SDValue EltNo = N->getOperand(1); 5117 if (!isa<ConstantSDNode>(EltNo)) 5118 return false; 5119 5120 // If the bit convert changed the number of elements, it is unsafe 5121 // to examine the mask. 5122 bool HasShuffleIntoBitcast = false; 5123 if (V.getOpcode() == ISD::BIT_CONVERT) { 5124 EVT SrcVT = V.getOperand(0).getValueType(); 5125 if (SrcVT.getVectorNumElements() != VT.getVectorNumElements()) 5126 return false; 5127 V = V.getOperand(0); 5128 HasShuffleIntoBitcast = true; 5129 } 5130 5131 // Select the input vector, guarding against out of range extract vector. 5132 unsigned NumElems = VT.getVectorNumElements(); 5133 unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 5134 int Idx = (Elt > NumElems) ? -1 : SVOp->getMaskElt(Elt); 5135 V = (Idx < (int)NumElems) ? V.getOperand(0) : V.getOperand(1); 5136 5137 // Skip one more bit_convert if necessary 5138 if (V.getOpcode() == ISD::BIT_CONVERT) 5139 V = V.getOperand(0); 5140 5141 if (ISD::isNormalLoad(V.getNode())) { 5142 // Is the original load suitable? 5143 LoadSDNode *LN0 = cast<LoadSDNode>(V); 5144 5145 // FIXME: avoid the multi-use bug that is preventing lots of 5146 // of foldings to be detected, this is still wrong of course, but 5147 // give the temporary desired behavior, and if it happens that 5148 // the load has real more uses, during isel it will not fold, and 5149 // will generate poor code. 5150 if (!LN0 || LN0->isVolatile()) // || !LN0->hasOneUse() 5151 return false; 5152 5153 if (!HasShuffleIntoBitcast) 5154 return true; 5155 5156 // If there's a bitcast before the shuffle, check if the load type and 5157 // alignment is valid. 5158 unsigned Align = LN0->getAlignment(); 5159 unsigned NewAlign = 5160 TLI.getTargetData()->getABITypeAlignment( 5161 VT.getTypeForEVT(*DAG.getContext())); 5162 5163 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT)) 5164 return false; 5165 } 5166 5167 return true; 5168} 5169 5170static 5171SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) { 5172 EVT VT = Op.getValueType(); 5173 5174 // Canonizalize to v2f64. 5175 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, V1); 5176 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, 5177 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64, 5178 V1, DAG)); 5179} 5180 5181static 5182SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, 5183 bool HasSSE2) { 5184 SDValue V1 = Op.getOperand(0); 5185 SDValue V2 = Op.getOperand(1); 5186 EVT VT = Op.getValueType(); 5187 5188 assert(VT != MVT::v2i64 && "unsupported shuffle type"); 5189 5190 if (HasSSE2 && VT == MVT::v2f64) 5191 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG); 5192 5193 // v4f32 or v4i32 5194 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V2, DAG); 5195} 5196 5197static 5198SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) { 5199 SDValue V1 = Op.getOperand(0); 5200 SDValue V2 = Op.getOperand(1); 5201 EVT VT = Op.getValueType(); 5202 5203 assert((VT == MVT::v4i32 || VT == MVT::v4f32) && 5204 "unsupported shuffle type"); 5205 5206 if (V2.getOpcode() == ISD::UNDEF) 5207 V2 = V1; 5208 5209 // v4i32 or v4f32 5210 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG); 5211} 5212 5213static 5214SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) { 5215 SDValue V1 = Op.getOperand(0); 5216 SDValue V2 = Op.getOperand(1); 5217 EVT VT = Op.getValueType(); 5218 unsigned NumElems = VT.getVectorNumElements(); 5219 5220 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second 5221 // operand of these instructions is only memory, so check if there's a 5222 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the 5223 // same masks. 5224 bool CanFoldLoad = false; 5225 5226 // Trivial case, when V2 comes from a load. 5227 if (MayFoldVectorLoad(V2)) 5228 CanFoldLoad = true; 5229 5230 // When V1 is a load, it can be folded later into a store in isel, example: 5231 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1) 5232 // turns into: 5233 // (MOVLPSmr addr:$src1, VR128:$src2) 5234 // So, recognize this potential and also use MOVLPS or MOVLPD 5235 if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op)) 5236 CanFoldLoad = true; 5237 5238 if (CanFoldLoad) { 5239 if (HasSSE2 && NumElems == 2) 5240 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG); 5241 5242 if (NumElems == 4) 5243 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG); 5244 } 5245 5246 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5247 // movl and movlp will both match v2i64, but v2i64 is never matched by 5248 // movl earlier because we make it strict to avoid messing with the movlp load 5249 // folding logic (see the code above getMOVLP call). Match it here then, 5250 // this is horrible, but will stay like this until we move all shuffle 5251 // matching to x86 specific nodes. Note that for the 1st condition all 5252 // types are matched with movsd. 5253 if ((HasSSE2 && NumElems == 2) || !X86::isMOVLMask(SVOp)) 5254 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 5255 else if (HasSSE2) 5256 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 5257 5258 5259 assert(VT != MVT::v4i32 && "unsupported shuffle type"); 5260 5261 // Invert the operand order and use SHUFPS to match it. 5262 return getTargetShuffleNode(X86ISD::SHUFPS, dl, VT, V2, V1, 5263 X86::getShuffleSHUFImmediate(SVOp), DAG); 5264} 5265 5266static inline unsigned getUNPCKLOpcode(EVT VT) { 5267 switch(VT.getSimpleVT().SimpleTy) { 5268 case MVT::v4i32: return X86ISD::PUNPCKLDQ; 5269 case MVT::v2i64: return X86ISD::PUNPCKLQDQ; 5270 case MVT::v4f32: return X86ISD::UNPCKLPS; 5271 case MVT::v2f64: return X86ISD::UNPCKLPD; 5272 case MVT::v16i8: return X86ISD::PUNPCKLBW; 5273 case MVT::v8i16: return X86ISD::PUNPCKLWD; 5274 default: 5275 llvm_unreachable("Unknow type for unpckl"); 5276 } 5277 return 0; 5278} 5279 5280static inline unsigned getUNPCKHOpcode(EVT VT) { 5281 switch(VT.getSimpleVT().SimpleTy) { 5282 case MVT::v4i32: return X86ISD::PUNPCKHDQ; 5283 case MVT::v2i64: return X86ISD::PUNPCKHQDQ; 5284 case MVT::v4f32: return X86ISD::UNPCKHPS; 5285 case MVT::v2f64: return X86ISD::UNPCKHPD; 5286 case MVT::v16i8: return X86ISD::PUNPCKHBW; 5287 case MVT::v8i16: return X86ISD::PUNPCKHWD; 5288 default: 5289 llvm_unreachable("Unknow type for unpckh"); 5290 } 5291 return 0; 5292} 5293 5294static 5295SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG, 5296 const TargetLowering &TLI, 5297 const X86Subtarget *Subtarget) { 5298 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5299 EVT VT = Op.getValueType(); 5300 DebugLoc dl = Op.getDebugLoc(); 5301 SDValue V1 = Op.getOperand(0); 5302 SDValue V2 = Op.getOperand(1); 5303 5304 if (isZeroShuffle(SVOp)) 5305 return getZeroVector(VT, Subtarget->hasSSE2(), DAG, dl); 5306 5307 // Handle splat operations 5308 if (SVOp->isSplat()) { 5309 // Special case, this is the only place now where it's 5310 // allowed to return a vector_shuffle operation without 5311 // using a target specific node, because *hopefully* it 5312 // will be optimized away by the dag combiner. 5313 if (VT.getVectorNumElements() <= 4 && 5314 CanXFormVExtractWithShuffleIntoLoad(Op, DAG, TLI)) 5315 return Op; 5316 5317 // Handle splats by matching through known masks 5318 if (VT.getVectorNumElements() <= 4) 5319 return SDValue(); 5320 5321 // Canonicalize all of the remaining to v4f32. 5322 return PromoteSplat(SVOp, DAG); 5323 } 5324 5325 // If the shuffle can be profitably rewritten as a narrower shuffle, then 5326 // do it! 5327 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 5328 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 5329 if (NewOp.getNode()) 5330 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, NewOp); 5331 } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 5332 // FIXME: Figure out a cleaner way to do this. 5333 // Try to make use of movq to zero out the top part. 5334 if (ISD::isBuildVectorAllZeros(V2.getNode())) { 5335 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 5336 if (NewOp.getNode()) { 5337 if (isCommutedMOVL(cast<ShuffleVectorSDNode>(NewOp), true, false)) 5338 return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(0), 5339 DAG, Subtarget, dl); 5340 } 5341 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) { 5342 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 5343 if (NewOp.getNode() && X86::isMOVLMask(cast<ShuffleVectorSDNode>(NewOp))) 5344 return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(1), 5345 DAG, Subtarget, dl); 5346 } 5347 } 5348 return SDValue(); 5349} 5350 5351SDValue 5352X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { 5353 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5354 SDValue V1 = Op.getOperand(0); 5355 SDValue V2 = Op.getOperand(1); 5356 EVT VT = Op.getValueType(); 5357 DebugLoc dl = Op.getDebugLoc(); 5358 unsigned NumElems = VT.getVectorNumElements(); 5359 bool isMMX = VT.getSizeInBits() == 64; 5360 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 5361 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 5362 bool V1IsSplat = false; 5363 bool V2IsSplat = false; 5364 bool HasSSE2 = Subtarget->hasSSE2() || Subtarget->hasAVX(); 5365 bool HasSSE3 = Subtarget->hasSSE3() || Subtarget->hasAVX(); 5366 bool HasSSSE3 = Subtarget->hasSSSE3() || Subtarget->hasAVX(); 5367 MachineFunction &MF = DAG.getMachineFunction(); 5368 bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize); 5369 5370 // Shuffle operations on MMX not supported. 5371 if (isMMX) 5372 return Op; 5373 5374 // Vector shuffle lowering takes 3 steps: 5375 // 5376 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable 5377 // narrowing and commutation of operands should be handled. 5378 // 2) Matching of shuffles with known shuffle masks to x86 target specific 5379 // shuffle nodes. 5380 // 3) Rewriting of unmatched masks into new generic shuffle operations, 5381 // so the shuffle can be broken into other shuffles and the legalizer can 5382 // try the lowering again. 5383 // 5384 // The general ideia is that no vector_shuffle operation should be left to 5385 // be matched during isel, all of them must be converted to a target specific 5386 // node here. 5387 5388 // Normalize the input vectors. Here splats, zeroed vectors, profitable 5389 // narrowing and commutation of operands should be handled. The actual code 5390 // doesn't include all of those, work in progress... 5391 SDValue NewOp = NormalizeVectorShuffle(Op, DAG, *this, Subtarget); 5392 if (NewOp.getNode()) 5393 return NewOp; 5394 5395 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and 5396 // unpckh_undef). Only use pshufd if speed is more important than size. 5397 if (OptForSize && X86::isUNPCKL_v_undef_Mask(SVOp)) 5398 if (VT != MVT::v2i64 && VT != MVT::v2f64) 5399 return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V1, DAG); 5400 if (OptForSize && X86::isUNPCKH_v_undef_Mask(SVOp)) 5401 if (VT != MVT::v2i64 && VT != MVT::v2f64) 5402 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG); 5403 5404 if (X86::isMOVDDUPMask(SVOp) && HasSSE3 && V2IsUndef && 5405 RelaxedMayFoldVectorLoad(V1)) 5406 return getMOVDDup(Op, dl, V1, DAG); 5407 5408 if (X86::isMOVHLPS_v_undef_Mask(SVOp)) 5409 return getMOVHighToLow(Op, dl, DAG); 5410 5411 // Use to match splats 5412 if (HasSSE2 && X86::isUNPCKHMask(SVOp) && V2IsUndef && 5413 (VT == MVT::v2f64 || VT == MVT::v2i64)) 5414 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG); 5415 5416 if (X86::isPSHUFDMask(SVOp)) { 5417 // The actual implementation will match the mask in the if above and then 5418 // during isel it can match several different instructions, not only pshufd 5419 // as its name says, sad but true, emulate the behavior for now... 5420 if (X86::isMOVDDUPMask(SVOp) && ((VT == MVT::v4f32 || VT == MVT::v2i64))) 5421 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG); 5422 5423 unsigned TargetMask = X86::getShuffleSHUFImmediate(SVOp); 5424 5425 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32)) 5426 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG); 5427 5428 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) 5429 return getTargetShuffleNode(X86ISD::SHUFPD, dl, VT, V1, V1, 5430 TargetMask, DAG); 5431 5432 if (VT == MVT::v4f32) 5433 return getTargetShuffleNode(X86ISD::SHUFPS, dl, VT, V1, V1, 5434 TargetMask, DAG); 5435 } 5436 5437 // Check if this can be converted into a logical shift. 5438 bool isLeft = false; 5439 unsigned ShAmt = 0; 5440 SDValue ShVal; 5441 bool isShift = getSubtarget()->hasSSE2() && 5442 isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt); 5443 if (isShift && ShVal.hasOneUse()) { 5444 // If the shifted value has multiple uses, it may be cheaper to use 5445 // v_set0 + movlhps or movhlps, etc. 5446 EVT EltVT = VT.getVectorElementType(); 5447 ShAmt *= EltVT.getSizeInBits(); 5448 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 5449 } 5450 5451 if (X86::isMOVLMask(SVOp)) { 5452 if (V1IsUndef) 5453 return V2; 5454 if (ISD::isBuildVectorAllZeros(V1.getNode())) 5455 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl); 5456 if (!X86::isMOVLPMask(SVOp)) { 5457 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) 5458 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 5459 5460 if (VT == MVT::v4i32 || VT == MVT::v4f32) 5461 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 5462 } 5463 } 5464 5465 // FIXME: fold these into legal mask. 5466 if (X86::isMOVLHPSMask(SVOp) && !X86::isUNPCKLMask(SVOp)) 5467 return getMOVLowToHigh(Op, dl, DAG, HasSSE2); 5468 5469 if (X86::isMOVHLPSMask(SVOp)) 5470 return getMOVHighToLow(Op, dl, DAG); 5471 5472 if (X86::isMOVSHDUPMask(SVOp) && HasSSE3 && V2IsUndef && NumElems == 4) 5473 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG); 5474 5475 if (X86::isMOVSLDUPMask(SVOp) && HasSSE3 && V2IsUndef && NumElems == 4) 5476 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG); 5477 5478 if (X86::isMOVLPMask(SVOp)) 5479 return getMOVLP(Op, dl, DAG, HasSSE2); 5480 5481 if (ShouldXformToMOVHLPS(SVOp) || 5482 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), SVOp)) 5483 return CommuteVectorShuffle(SVOp, DAG); 5484 5485 if (isShift) { 5486 // No better options. Use a vshl / vsrl. 5487 EVT EltVT = VT.getVectorElementType(); 5488 ShAmt *= EltVT.getSizeInBits(); 5489 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 5490 } 5491 5492 bool Commuted = false; 5493 // FIXME: This should also accept a bitcast of a splat? Be careful, not 5494 // 1,1,1,1 -> v8i16 though. 5495 V1IsSplat = isSplatVector(V1.getNode()); 5496 V2IsSplat = isSplatVector(V2.getNode()); 5497 5498 // Canonicalize the splat or undef, if present, to be on the RHS. 5499 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 5500 Op = CommuteVectorShuffle(SVOp, DAG); 5501 SVOp = cast<ShuffleVectorSDNode>(Op); 5502 V1 = SVOp->getOperand(0); 5503 V2 = SVOp->getOperand(1); 5504 std::swap(V1IsSplat, V2IsSplat); 5505 std::swap(V1IsUndef, V2IsUndef); 5506 Commuted = true; 5507 } 5508 5509 if (isCommutedMOVL(SVOp, V2IsSplat, V2IsUndef)) { 5510 // Shuffling low element of v1 into undef, just return v1. 5511 if (V2IsUndef) 5512 return V1; 5513 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which 5514 // the instruction selector will not match, so get a canonical MOVL with 5515 // swapped operands to undo the commute. 5516 return getMOVL(DAG, dl, VT, V2, V1); 5517 } 5518 5519 if (X86::isUNPCKLMask(SVOp)) 5520 return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V2, DAG); 5521 5522 if (X86::isUNPCKHMask(SVOp)) 5523 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V2, DAG); 5524 5525 if (V2IsSplat) { 5526 // Normalize mask so all entries that point to V2 points to its first 5527 // element then try to match unpck{h|l} again. If match, return a 5528 // new vector_shuffle with the corrected mask. 5529 SDValue NewMask = NormalizeMask(SVOp, DAG); 5530 ShuffleVectorSDNode *NSVOp = cast<ShuffleVectorSDNode>(NewMask); 5531 if (NSVOp != SVOp) { 5532 if (X86::isUNPCKLMask(NSVOp, true)) { 5533 return NewMask; 5534 } else if (X86::isUNPCKHMask(NSVOp, true)) { 5535 return NewMask; 5536 } 5537 } 5538 } 5539 5540 if (Commuted) { 5541 // Commute is back and try unpck* again. 5542 // FIXME: this seems wrong. 5543 SDValue NewOp = CommuteVectorShuffle(SVOp, DAG); 5544 ShuffleVectorSDNode *NewSVOp = cast<ShuffleVectorSDNode>(NewOp); 5545 5546 if (X86::isUNPCKLMask(NewSVOp)) 5547 return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V2, V1, DAG); 5548 5549 if (X86::isUNPCKHMask(NewSVOp)) 5550 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V2, V1, DAG); 5551 } 5552 5553 // Normalize the node to match x86 shuffle ops if needed 5554 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(SVOp)) 5555 return CommuteVectorShuffle(SVOp, DAG); 5556 5557 // The checks below are all present in isShuffleMaskLegal, but they are 5558 // inlined here right now to enable us to directly emit target specific 5559 // nodes, and remove one by one until they don't return Op anymore. 5560 SmallVector<int, 16> M; 5561 SVOp->getMask(M); 5562 5563 if (isPALIGNRMask(M, VT, HasSSSE3)) 5564 return getTargetShuffleNode(X86ISD::PALIGN, dl, VT, V1, V2, 5565 X86::getShufflePALIGNRImmediate(SVOp), 5566 DAG); 5567 5568 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) && 5569 SVOp->getSplatIndex() == 0 && V2IsUndef) { 5570 if (VT == MVT::v2f64) 5571 return getTargetShuffleNode(X86ISD::UNPCKLPD, dl, VT, V1, V1, DAG); 5572 if (VT == MVT::v2i64) 5573 return getTargetShuffleNode(X86ISD::PUNPCKLQDQ, dl, VT, V1, V1, DAG); 5574 } 5575 5576 if (isPSHUFHWMask(M, VT)) 5577 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1, 5578 X86::getShufflePSHUFHWImmediate(SVOp), 5579 DAG); 5580 5581 if (isPSHUFLWMask(M, VT)) 5582 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1, 5583 X86::getShufflePSHUFLWImmediate(SVOp), 5584 DAG); 5585 5586 if (isSHUFPMask(M, VT)) { 5587 unsigned TargetMask = X86::getShuffleSHUFImmediate(SVOp); 5588 if (VT == MVT::v4f32 || VT == MVT::v4i32) 5589 return getTargetShuffleNode(X86ISD::SHUFPS, dl, VT, V1, V2, 5590 TargetMask, DAG); 5591 if (VT == MVT::v2f64 || VT == MVT::v2i64) 5592 return getTargetShuffleNode(X86ISD::SHUFPD, dl, VT, V1, V2, 5593 TargetMask, DAG); 5594 } 5595 5596 if (X86::isUNPCKL_v_undef_Mask(SVOp)) 5597 if (VT != MVT::v2i64 && VT != MVT::v2f64) 5598 return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V1, DAG); 5599 if (X86::isUNPCKH_v_undef_Mask(SVOp)) 5600 if (VT != MVT::v2i64 && VT != MVT::v2f64) 5601 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG); 5602 5603 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 5604 if (VT == MVT::v8i16) { 5605 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, DAG); 5606 if (NewOp.getNode()) 5607 return NewOp; 5608 } 5609 5610 if (VT == MVT::v16i8) { 5611 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, DAG, *this); 5612 if (NewOp.getNode()) 5613 return NewOp; 5614 } 5615 5616 // Handle all 4 wide cases with a number of shuffles. 5617 if (NumElems == 4) 5618 return LowerVECTOR_SHUFFLE_4wide(SVOp, DAG); 5619 5620 return SDValue(); 5621} 5622 5623SDValue 5624X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, 5625 SelectionDAG &DAG) const { 5626 EVT VT = Op.getValueType(); 5627 DebugLoc dl = Op.getDebugLoc(); 5628 if (VT.getSizeInBits() == 8) { 5629 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, 5630 Op.getOperand(0), Op.getOperand(1)); 5631 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 5632 DAG.getValueType(VT)); 5633 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 5634 } else if (VT.getSizeInBits() == 16) { 5635 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 5636 // If Idx is 0, it's cheaper to do a move instead of a pextrw. 5637 if (Idx == 0) 5638 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 5639 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 5640 DAG.getNode(ISD::BIT_CONVERT, dl, 5641 MVT::v4i32, 5642 Op.getOperand(0)), 5643 Op.getOperand(1))); 5644 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, 5645 Op.getOperand(0), Op.getOperand(1)); 5646 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 5647 DAG.getValueType(VT)); 5648 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 5649 } else if (VT == MVT::f32) { 5650 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy 5651 // the result back to FR32 register. It's only worth matching if the 5652 // result has a single use which is a store or a bitcast to i32. And in 5653 // the case of a store, it's not worth it if the index is a constant 0, 5654 // because a MOVSSmr can be used instead, which is smaller and faster. 5655 if (!Op.hasOneUse()) 5656 return SDValue(); 5657 SDNode *User = *Op.getNode()->use_begin(); 5658 if ((User->getOpcode() != ISD::STORE || 5659 (isa<ConstantSDNode>(Op.getOperand(1)) && 5660 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) && 5661 (User->getOpcode() != ISD::BIT_CONVERT || 5662 User->getValueType(0) != MVT::i32)) 5663 return SDValue(); 5664 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 5665 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, 5666 Op.getOperand(0)), 5667 Op.getOperand(1)); 5668 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Extract); 5669 } else if (VT == MVT::i32) { 5670 // ExtractPS works with constant index. 5671 if (isa<ConstantSDNode>(Op.getOperand(1))) 5672 return Op; 5673 } 5674 return SDValue(); 5675} 5676 5677 5678SDValue 5679X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 5680 SelectionDAG &DAG) const { 5681 if (!isa<ConstantSDNode>(Op.getOperand(1))) 5682 return SDValue(); 5683 5684 if (Subtarget->hasSSE41()) { 5685 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 5686 if (Res.getNode()) 5687 return Res; 5688 } 5689 5690 EVT VT = Op.getValueType(); 5691 DebugLoc dl = Op.getDebugLoc(); 5692 // TODO: handle v16i8. 5693 if (VT.getSizeInBits() == 16) { 5694 SDValue Vec = Op.getOperand(0); 5695 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 5696 if (Idx == 0) 5697 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 5698 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 5699 DAG.getNode(ISD::BIT_CONVERT, dl, 5700 MVT::v4i32, Vec), 5701 Op.getOperand(1))); 5702 // Transform it so it match pextrw which produces a 32-bit result. 5703 EVT EltVT = MVT::i32; 5704 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, 5705 Op.getOperand(0), Op.getOperand(1)); 5706 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract, 5707 DAG.getValueType(VT)); 5708 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 5709 } else if (VT.getSizeInBits() == 32) { 5710 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 5711 if (Idx == 0) 5712 return Op; 5713 5714 // SHUFPS the element to the lowest double word, then movss. 5715 int Mask[4] = { Idx, -1, -1, -1 }; 5716 EVT VVT = Op.getOperand(0).getValueType(); 5717 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 5718 DAG.getUNDEF(VVT), Mask); 5719 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 5720 DAG.getIntPtrConstant(0)); 5721 } else if (VT.getSizeInBits() == 64) { 5722 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 5723 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 5724 // to match extract_elt for f64. 5725 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 5726 if (Idx == 0) 5727 return Op; 5728 5729 // UNPCKHPD the element to the lowest double word, then movsd. 5730 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 5731 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 5732 int Mask[2] = { 1, -1 }; 5733 EVT VVT = Op.getOperand(0).getValueType(); 5734 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 5735 DAG.getUNDEF(VVT), Mask); 5736 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 5737 DAG.getIntPtrConstant(0)); 5738 } 5739 5740 return SDValue(); 5741} 5742 5743SDValue 5744X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, 5745 SelectionDAG &DAG) const { 5746 EVT VT = Op.getValueType(); 5747 EVT EltVT = VT.getVectorElementType(); 5748 DebugLoc dl = Op.getDebugLoc(); 5749 5750 SDValue N0 = Op.getOperand(0); 5751 SDValue N1 = Op.getOperand(1); 5752 SDValue N2 = Op.getOperand(2); 5753 5754 if ((EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) && 5755 isa<ConstantSDNode>(N2)) { 5756 unsigned Opc; 5757 if (VT == MVT::v8i16) 5758 Opc = X86ISD::PINSRW; 5759 else if (VT == MVT::v16i8) 5760 Opc = X86ISD::PINSRB; 5761 else 5762 Opc = X86ISD::PINSRB; 5763 5764 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 5765 // argument. 5766 if (N1.getValueType() != MVT::i32) 5767 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 5768 if (N2.getValueType() != MVT::i32) 5769 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 5770 return DAG.getNode(Opc, dl, VT, N0, N1, N2); 5771 } else if (EltVT == MVT::f32 && isa<ConstantSDNode>(N2)) { 5772 // Bits [7:6] of the constant are the source select. This will always be 5773 // zero here. The DAG Combiner may combine an extract_elt index into these 5774 // bits. For example (insert (extract, 3), 2) could be matched by putting 5775 // the '3' into bits [7:6] of X86ISD::INSERTPS. 5776 // Bits [5:4] of the constant are the destination select. This is the 5777 // value of the incoming immediate. 5778 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 5779 // combine either bitwise AND or insert of float 0.0 to set these bits. 5780 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4); 5781 // Create this as a scalar to vector.. 5782 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1); 5783 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2); 5784 } else if (EltVT == MVT::i32 && isa<ConstantSDNode>(N2)) { 5785 // PINSR* works with constant index. 5786 return Op; 5787 } 5788 return SDValue(); 5789} 5790 5791SDValue 5792X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { 5793 EVT VT = Op.getValueType(); 5794 EVT EltVT = VT.getVectorElementType(); 5795 5796 if (Subtarget->hasSSE41()) 5797 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 5798 5799 if (EltVT == MVT::i8) 5800 return SDValue(); 5801 5802 DebugLoc dl = Op.getDebugLoc(); 5803 SDValue N0 = Op.getOperand(0); 5804 SDValue N1 = Op.getOperand(1); 5805 SDValue N2 = Op.getOperand(2); 5806 5807 if (EltVT.getSizeInBits() == 16 && isa<ConstantSDNode>(N2)) { 5808 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 5809 // as its second argument. 5810 if (N1.getValueType() != MVT::i32) 5811 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 5812 if (N2.getValueType() != MVT::i32) 5813 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 5814 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2); 5815 } 5816 return SDValue(); 5817} 5818 5819SDValue 5820X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const { 5821 DebugLoc dl = Op.getDebugLoc(); 5822 5823 if (Op.getValueType() == MVT::v1i64 && 5824 Op.getOperand(0).getValueType() == MVT::i64) 5825 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0)); 5826 5827 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0)); 5828 assert(Op.getValueType().getSimpleVT().getSizeInBits() == 128 && 5829 "Expected an SSE type!"); 5830 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), 5831 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt)); 5832} 5833 5834// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 5835// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 5836// one of the above mentioned nodes. It has to be wrapped because otherwise 5837// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 5838// be used to form addressing mode. These wrapped nodes will be selected 5839// into MOV32ri. 5840SDValue 5841X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { 5842 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 5843 5844 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 5845 // global base reg. 5846 unsigned char OpFlag = 0; 5847 unsigned WrapperKind = X86ISD::Wrapper; 5848 CodeModel::Model M = getTargetMachine().getCodeModel(); 5849 5850 if (Subtarget->isPICStyleRIPRel() && 5851 (M == CodeModel::Small || M == CodeModel::Kernel)) 5852 WrapperKind = X86ISD::WrapperRIP; 5853 else if (Subtarget->isPICStyleGOT()) 5854 OpFlag = X86II::MO_GOTOFF; 5855 else if (Subtarget->isPICStyleStubPIC()) 5856 OpFlag = X86II::MO_PIC_BASE_OFFSET; 5857 5858 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(), 5859 CP->getAlignment(), 5860 CP->getOffset(), OpFlag); 5861 DebugLoc DL = CP->getDebugLoc(); 5862 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 5863 // With PIC, the address is actually $g + Offset. 5864 if (OpFlag) { 5865 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 5866 DAG.getNode(X86ISD::GlobalBaseReg, 5867 DebugLoc(), getPointerTy()), 5868 Result); 5869 } 5870 5871 return Result; 5872} 5873 5874SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 5875 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 5876 5877 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 5878 // global base reg. 5879 unsigned char OpFlag = 0; 5880 unsigned WrapperKind = X86ISD::Wrapper; 5881 CodeModel::Model M = getTargetMachine().getCodeModel(); 5882 5883 if (Subtarget->isPICStyleRIPRel() && 5884 (M == CodeModel::Small || M == CodeModel::Kernel)) 5885 WrapperKind = X86ISD::WrapperRIP; 5886 else if (Subtarget->isPICStyleGOT()) 5887 OpFlag = X86II::MO_GOTOFF; 5888 else if (Subtarget->isPICStyleStubPIC()) 5889 OpFlag = X86II::MO_PIC_BASE_OFFSET; 5890 5891 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(), 5892 OpFlag); 5893 DebugLoc DL = JT->getDebugLoc(); 5894 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 5895 5896 // With PIC, the address is actually $g + Offset. 5897 if (OpFlag) { 5898 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 5899 DAG.getNode(X86ISD::GlobalBaseReg, 5900 DebugLoc(), getPointerTy()), 5901 Result); 5902 } 5903 5904 return Result; 5905} 5906 5907SDValue 5908X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { 5909 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 5910 5911 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 5912 // global base reg. 5913 unsigned char OpFlag = 0; 5914 unsigned WrapperKind = X86ISD::Wrapper; 5915 CodeModel::Model M = getTargetMachine().getCodeModel(); 5916 5917 if (Subtarget->isPICStyleRIPRel() && 5918 (M == CodeModel::Small || M == CodeModel::Kernel)) 5919 WrapperKind = X86ISD::WrapperRIP; 5920 else if (Subtarget->isPICStyleGOT()) 5921 OpFlag = X86II::MO_GOTOFF; 5922 else if (Subtarget->isPICStyleStubPIC()) 5923 OpFlag = X86II::MO_PIC_BASE_OFFSET; 5924 5925 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag); 5926 5927 DebugLoc DL = Op.getDebugLoc(); 5928 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 5929 5930 5931 // With PIC, the address is actually $g + Offset. 5932 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 5933 !Subtarget->is64Bit()) { 5934 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 5935 DAG.getNode(X86ISD::GlobalBaseReg, 5936 DebugLoc(), getPointerTy()), 5937 Result); 5938 } 5939 5940 return Result; 5941} 5942 5943SDValue 5944X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 5945 // Create the TargetBlockAddressAddress node. 5946 unsigned char OpFlags = 5947 Subtarget->ClassifyBlockAddressReference(); 5948 CodeModel::Model M = getTargetMachine().getCodeModel(); 5949 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 5950 DebugLoc dl = Op.getDebugLoc(); 5951 SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), 5952 /*isTarget=*/true, OpFlags); 5953 5954 if (Subtarget->isPICStyleRIPRel() && 5955 (M == CodeModel::Small || M == CodeModel::Kernel)) 5956 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 5957 else 5958 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 5959 5960 // With PIC, the address is actually $g + Offset. 5961 if (isGlobalRelativeToPICBase(OpFlags)) { 5962 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 5963 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 5964 Result); 5965 } 5966 5967 return Result; 5968} 5969 5970SDValue 5971X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, 5972 int64_t Offset, 5973 SelectionDAG &DAG) const { 5974 // Create the TargetGlobalAddress node, folding in the constant 5975 // offset if it is legal. 5976 unsigned char OpFlags = 5977 Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); 5978 CodeModel::Model M = getTargetMachine().getCodeModel(); 5979 SDValue Result; 5980 if (OpFlags == X86II::MO_NO_FLAG && 5981 X86::isOffsetSuitableForCodeModel(Offset, M)) { 5982 // A direct static reference to a global. 5983 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset); 5984 Offset = 0; 5985 } else { 5986 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 5987 } 5988 5989 if (Subtarget->isPICStyleRIPRel() && 5990 (M == CodeModel::Small || M == CodeModel::Kernel)) 5991 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 5992 else 5993 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 5994 5995 // With PIC, the address is actually $g + Offset. 5996 if (isGlobalRelativeToPICBase(OpFlags)) { 5997 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 5998 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 5999 Result); 6000 } 6001 6002 // For globals that require a load from a stub to get the address, emit the 6003 // load. 6004 if (isGlobalStubReference(OpFlags)) 6005 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result, 6006 MachinePointerInfo::getGOT(), false, false, 0); 6007 6008 // If there was a non-zero offset that we didn't fold, create an explicit 6009 // addition for it. 6010 if (Offset != 0) 6011 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result, 6012 DAG.getConstant(Offset, getPointerTy())); 6013 6014 return Result; 6015} 6016 6017SDValue 6018X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { 6019 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 6020 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); 6021 return LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG); 6022} 6023 6024static SDValue 6025GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, 6026 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg, 6027 unsigned char OperandFlags) { 6028 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 6029 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 6030 DebugLoc dl = GA->getDebugLoc(); 6031 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 6032 GA->getValueType(0), 6033 GA->getOffset(), 6034 OperandFlags); 6035 if (InFlag) { 6036 SDValue Ops[] = { Chain, TGA, *InFlag }; 6037 Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 3); 6038 } else { 6039 SDValue Ops[] = { Chain, TGA }; 6040 Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 2); 6041 } 6042 6043 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 6044 MFI->setAdjustsStack(true); 6045 6046 SDValue Flag = Chain.getValue(1); 6047 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag); 6048} 6049 6050// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit 6051static SDValue 6052LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, 6053 const EVT PtrVT) { 6054 SDValue InFlag; 6055 DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better 6056 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 6057 DAG.getNode(X86ISD::GlobalBaseReg, 6058 DebugLoc(), PtrVT), InFlag); 6059 InFlag = Chain.getValue(1); 6060 6061 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD); 6062} 6063 6064// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit 6065static SDValue 6066LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, 6067 const EVT PtrVT) { 6068 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, 6069 X86::RAX, X86II::MO_TLSGD); 6070} 6071 6072// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 6073// "local exec" model. 6074static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 6075 const EVT PtrVT, TLSModel::Model model, 6076 bool is64Bit) { 6077 DebugLoc dl = GA->getDebugLoc(); 6078 6079 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit). 6080 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(), 6081 is64Bit ? 257 : 256)); 6082 6083 SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 6084 DAG.getIntPtrConstant(0), 6085 MachinePointerInfo(Ptr), false, false, 0); 6086 6087 unsigned char OperandFlags = 0; 6088 // Most TLS accesses are not RIP relative, even on x86-64. One exception is 6089 // initialexec. 6090 unsigned WrapperKind = X86ISD::Wrapper; 6091 if (model == TLSModel::LocalExec) { 6092 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF; 6093 } else if (is64Bit) { 6094 assert(model == TLSModel::InitialExec); 6095 OperandFlags = X86II::MO_GOTTPOFF; 6096 WrapperKind = X86ISD::WrapperRIP; 6097 } else { 6098 assert(model == TLSModel::InitialExec); 6099 OperandFlags = X86II::MO_INDNTPOFF; 6100 } 6101 6102 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 6103 // exec) 6104 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 6105 GA->getValueType(0), 6106 GA->getOffset(), OperandFlags); 6107 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 6108 6109 if (model == TLSModel::InitialExec) 6110 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, 6111 MachinePointerInfo::getGOT(), false, false, 0); 6112 6113 // The address of the thread local variable is the add of the thread 6114 // pointer with the offset of the variable. 6115 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 6116} 6117 6118SDValue 6119X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 6120 6121 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 6122 const GlobalValue *GV = GA->getGlobal(); 6123 6124 if (Subtarget->isTargetELF()) { 6125 // TODO: implement the "local dynamic" model 6126 // TODO: implement the "initial exec"model for pic executables 6127 6128 // If GV is an alias then use the aliasee for determining 6129 // thread-localness. 6130 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 6131 GV = GA->resolveAliasedGlobal(false); 6132 6133 TLSModel::Model model 6134 = getTLSModel(GV, getTargetMachine().getRelocationModel()); 6135 6136 switch (model) { 6137 case TLSModel::GeneralDynamic: 6138 case TLSModel::LocalDynamic: // not implemented 6139 if (Subtarget->is64Bit()) 6140 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy()); 6141 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy()); 6142 6143 case TLSModel::InitialExec: 6144 case TLSModel::LocalExec: 6145 return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, 6146 Subtarget->is64Bit()); 6147 } 6148 } else if (Subtarget->isTargetDarwin()) { 6149 // Darwin only has one model of TLS. Lower to that. 6150 unsigned char OpFlag = 0; 6151 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ? 6152 X86ISD::WrapperRIP : X86ISD::Wrapper; 6153 6154 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 6155 // global base reg. 6156 bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) && 6157 !Subtarget->is64Bit(); 6158 if (PIC32) 6159 OpFlag = X86II::MO_TLVP_PIC_BASE; 6160 else 6161 OpFlag = X86II::MO_TLVP; 6162 DebugLoc DL = Op.getDebugLoc(); 6163 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, 6164 getPointerTy(), 6165 GA->getOffset(), OpFlag); 6166 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 6167 6168 // With PIC32, the address is actually $g + Offset. 6169 if (PIC32) 6170 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(), 6171 DAG.getNode(X86ISD::GlobalBaseReg, 6172 DebugLoc(), getPointerTy()), 6173 Offset); 6174 6175 // Lowering the machine isd will make sure everything is in the right 6176 // location. 6177 SDValue Args[] = { Offset }; 6178 SDValue Chain = DAG.getNode(X86ISD::TLSCALL, DL, MVT::Other, Args, 1); 6179 6180 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls. 6181 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 6182 MFI->setAdjustsStack(true); 6183 6184 // And our return value (tls address) is in the standard call return value 6185 // location. 6186 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 6187 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy()); 6188 } 6189 6190 assert(false && 6191 "TLS not implemented for this target."); 6192 6193 llvm_unreachable("Unreachable"); 6194 return SDValue(); 6195} 6196 6197 6198/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and 6199/// take a 2 x i32 value to shift plus a shift amount. 6200SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { 6201 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 6202 EVT VT = Op.getValueType(); 6203 unsigned VTBits = VT.getSizeInBits(); 6204 DebugLoc dl = Op.getDebugLoc(); 6205 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 6206 SDValue ShOpLo = Op.getOperand(0); 6207 SDValue ShOpHi = Op.getOperand(1); 6208 SDValue ShAmt = Op.getOperand(2); 6209 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 6210 DAG.getConstant(VTBits - 1, MVT::i8)) 6211 : DAG.getConstant(0, VT); 6212 6213 SDValue Tmp2, Tmp3; 6214 if (Op.getOpcode() == ISD::SHL_PARTS) { 6215 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt); 6216 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 6217 } else { 6218 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt); 6219 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, ShAmt); 6220 } 6221 6222 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt, 6223 DAG.getConstant(VTBits, MVT::i8)); 6224 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 6225 AndNode, DAG.getConstant(0, MVT::i8)); 6226 6227 SDValue Hi, Lo; 6228 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8); 6229 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond }; 6230 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond }; 6231 6232 if (Op.getOpcode() == ISD::SHL_PARTS) { 6233 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 6234 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 6235 } else { 6236 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 6237 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 6238 } 6239 6240 SDValue Ops[2] = { Lo, Hi }; 6241 return DAG.getMergeValues(Ops, 2, dl); 6242} 6243 6244SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, 6245 SelectionDAG &DAG) const { 6246 EVT SrcVT = Op.getOperand(0).getValueType(); 6247 6248 if (SrcVT.isVector()) 6249 return SDValue(); 6250 6251 assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 && 6252 "Unknown SINT_TO_FP to lower!"); 6253 6254 // These are really Legal; return the operand so the caller accepts it as 6255 // Legal. 6256 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 6257 return Op; 6258 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) && 6259 Subtarget->is64Bit()) { 6260 return Op; 6261 } 6262 6263 DebugLoc dl = Op.getDebugLoc(); 6264 unsigned Size = SrcVT.getSizeInBits()/8; 6265 MachineFunction &MF = DAG.getMachineFunction(); 6266 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false); 6267 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 6268 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 6269 StackSlot, 6270 MachinePointerInfo::getFixedStack(SSFI), 6271 false, false, 0); 6272 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG); 6273} 6274 6275SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, 6276 SDValue StackSlot, 6277 SelectionDAG &DAG) const { 6278 // Build the FILD 6279 DebugLoc DL = Op.getDebugLoc(); 6280 SDVTList Tys; 6281 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 6282 if (useSSE) 6283 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 6284 else 6285 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 6286 6287 unsigned ByteSize = SrcVT.getSizeInBits()/8; 6288 6289 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex(); 6290 MachineMemOperand *MMO = 6291 DAG.getMachineFunction() 6292 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 6293 MachineMemOperand::MOLoad, ByteSize, ByteSize); 6294 6295 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) }; 6296 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : 6297 X86ISD::FILD, DL, 6298 Tys, Ops, array_lengthof(Ops), 6299 SrcVT, MMO); 6300 6301 if (useSSE) { 6302 Chain = Result.getValue(1); 6303 SDValue InFlag = Result.getValue(2); 6304 6305 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 6306 // shouldn't be necessary except that RFP cannot be live across 6307 // multiple blocks. When stackifier is fixed, they can be uncoupled. 6308 MachineFunction &MF = DAG.getMachineFunction(); 6309 unsigned SSFISize = Op.getValueType().getSizeInBits()/8; 6310 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false); 6311 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 6312 Tys = DAG.getVTList(MVT::Other); 6313 SDValue Ops[] = { 6314 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag 6315 }; 6316 MachineMemOperand *MMO = 6317 DAG.getMachineFunction() 6318 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 6319 MachineMemOperand::MOStore, SSFISize, SSFISize); 6320 6321 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, 6322 Ops, array_lengthof(Ops), 6323 Op.getValueType(), MMO); 6324 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot, 6325 MachinePointerInfo::getFixedStack(SSFI), 6326 false, false, 0); 6327 } 6328 6329 return Result; 6330} 6331 6332// LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion. 6333SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, 6334 SelectionDAG &DAG) const { 6335 // This algorithm is not obvious. Here it is in C code, more or less: 6336 /* 6337 double uint64_to_double( uint32_t hi, uint32_t lo ) { 6338 static const __m128i exp = { 0x4330000045300000ULL, 0 }; 6339 static const __m128d bias = { 0x1.0p84, 0x1.0p52 }; 6340 6341 // Copy ints to xmm registers. 6342 __m128i xh = _mm_cvtsi32_si128( hi ); 6343 __m128i xl = _mm_cvtsi32_si128( lo ); 6344 6345 // Combine into low half of a single xmm register. 6346 __m128i x = _mm_unpacklo_epi32( xh, xl ); 6347 __m128d d; 6348 double sd; 6349 6350 // Merge in appropriate exponents to give the integer bits the right 6351 // magnitude. 6352 x = _mm_unpacklo_epi32( x, exp ); 6353 6354 // Subtract away the biases to deal with the IEEE-754 double precision 6355 // implicit 1. 6356 d = _mm_sub_pd( (__m128d) x, bias ); 6357 6358 // All conversions up to here are exact. The correctly rounded result is 6359 // calculated using the current rounding mode using the following 6360 // horizontal add. 6361 d = _mm_add_sd( d, _mm_unpackhi_pd( d, d ) ); 6362 _mm_store_sd( &sd, d ); // Because we are returning doubles in XMM, this 6363 // store doesn't really need to be here (except 6364 // maybe to zero the other double) 6365 return sd; 6366 } 6367 */ 6368 6369 DebugLoc dl = Op.getDebugLoc(); 6370 LLVMContext *Context = DAG.getContext(); 6371 6372 // Build some magic constants. 6373 std::vector<Constant*> CV0; 6374 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0x45300000))); 6375 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0x43300000))); 6376 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0))); 6377 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0))); 6378 Constant *C0 = ConstantVector::get(CV0); 6379 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16); 6380 6381 std::vector<Constant*> CV1; 6382 CV1.push_back( 6383 ConstantFP::get(*Context, APFloat(APInt(64, 0x4530000000000000ULL)))); 6384 CV1.push_back( 6385 ConstantFP::get(*Context, APFloat(APInt(64, 0x4330000000000000ULL)))); 6386 Constant *C1 = ConstantVector::get(CV1); 6387 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16); 6388 6389 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 6390 DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 6391 Op.getOperand(0), 6392 DAG.getIntPtrConstant(1))); 6393 SDValue XR2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 6394 DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 6395 Op.getOperand(0), 6396 DAG.getIntPtrConstant(0))); 6397 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32, XR1, XR2); 6398 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0, 6399 MachinePointerInfo::getConstantPool(), 6400 false, false, 16); 6401 SDValue Unpck2 = getUnpackl(DAG, dl, MVT::v4i32, Unpck1, CLod0); 6402 SDValue XR2F = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Unpck2); 6403 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1, 6404 MachinePointerInfo::getConstantPool(), 6405 false, false, 16); 6406 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1); 6407 6408 // Add the halves; easiest way is to swap them into another reg first. 6409 int ShufMask[2] = { 1, -1 }; 6410 SDValue Shuf = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, 6411 DAG.getUNDEF(MVT::v2f64), ShufMask); 6412 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuf, Sub); 6413 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Add, 6414 DAG.getIntPtrConstant(0)); 6415} 6416 6417// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion. 6418SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, 6419 SelectionDAG &DAG) const { 6420 DebugLoc dl = Op.getDebugLoc(); 6421 // FP constant to bias correct the final result. 6422 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), 6423 MVT::f64); 6424 6425 // Load the 32-bit value into an XMM register. 6426 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 6427 DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 6428 Op.getOperand(0), 6429 DAG.getIntPtrConstant(0))); 6430 6431 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 6432 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Load), 6433 DAG.getIntPtrConstant(0)); 6434 6435 // Or the load with the bias. 6436 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, 6437 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, 6438 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 6439 MVT::v2f64, Load)), 6440 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, 6441 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 6442 MVT::v2f64, Bias))); 6443 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 6444 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Or), 6445 DAG.getIntPtrConstant(0)); 6446 6447 // Subtract the bias. 6448 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias); 6449 6450 // Handle final rounding. 6451 EVT DestVT = Op.getValueType(); 6452 6453 if (DestVT.bitsLT(MVT::f64)) { 6454 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 6455 DAG.getIntPtrConstant(0)); 6456 } else if (DestVT.bitsGT(MVT::f64)) { 6457 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 6458 } 6459 6460 // Handle final rounding. 6461 return Sub; 6462} 6463 6464SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, 6465 SelectionDAG &DAG) const { 6466 SDValue N0 = Op.getOperand(0); 6467 DebugLoc dl = Op.getDebugLoc(); 6468 6469 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't 6470 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform 6471 // the optimization here. 6472 if (DAG.SignBitIsZero(N0)) 6473 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0); 6474 6475 EVT SrcVT = N0.getValueType(); 6476 EVT DstVT = Op.getValueType(); 6477 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64) 6478 return LowerUINT_TO_FP_i64(Op, DAG); 6479 else if (SrcVT == MVT::i32 && X86ScalarSSEf64) 6480 return LowerUINT_TO_FP_i32(Op, DAG); 6481 6482 // Make a 64-bit buffer, and use it to build an FILD. 6483 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64); 6484 if (SrcVT == MVT::i32) { 6485 SDValue WordOff = DAG.getConstant(4, getPointerTy()); 6486 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl, 6487 getPointerTy(), StackSlot, WordOff); 6488 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 6489 StackSlot, MachinePointerInfo(), 6490 false, false, 0); 6491 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32), 6492 OffsetSlot, MachinePointerInfo(), 6493 false, false, 0); 6494 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG); 6495 return Fild; 6496 } 6497 6498 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP"); 6499 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 6500 StackSlot, MachinePointerInfo(), 6501 false, false, 0); 6502 // For i64 source, we need to add the appropriate power of 2 if the input 6503 // was negative. This is the same as the optimization in 6504 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here, 6505 // we must be careful to do the computation in x87 extended precision, not 6506 // in SSE. (The generic code can't know it's OK to do this, or how to.) 6507 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex(); 6508 MachineMemOperand *MMO = 6509 DAG.getMachineFunction() 6510 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 6511 MachineMemOperand::MOLoad, 8, 8); 6512 6513 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other); 6514 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) }; 6515 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, 3, 6516 MVT::i64, MMO); 6517 6518 APInt FF(32, 0x5F800000ULL); 6519 6520 // Check whether the sign bit is set. 6521 SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(MVT::i64), 6522 Op.getOperand(0), DAG.getConstant(0, MVT::i64), 6523 ISD::SETLT); 6524 6525 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits. 6526 SDValue FudgePtr = DAG.getConstantPool( 6527 ConstantInt::get(*DAG.getContext(), FF.zext(64)), 6528 getPointerTy()); 6529 6530 // Get a pointer to FF if the sign bit was set, or to 0 otherwise. 6531 SDValue Zero = DAG.getIntPtrConstant(0); 6532 SDValue Four = DAG.getIntPtrConstant(4); 6533 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet, 6534 Zero, Four); 6535 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset); 6536 6537 // Load the value out, extending it from f32 to f80. 6538 // FIXME: Avoid the extend by constructing the right constant pool? 6539 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, MVT::f80, dl, DAG.getEntryNode(), 6540 FudgePtr, MachinePointerInfo::getConstantPool(), 6541 MVT::f32, false, false, 4); 6542 // Extend everything to 80 bits to force it to be done on x87. 6543 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge); 6544 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0)); 6545} 6546 6547std::pair<SDValue,SDValue> X86TargetLowering:: 6548FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) const { 6549 DebugLoc DL = Op.getDebugLoc(); 6550 6551 EVT DstTy = Op.getValueType(); 6552 6553 if (!IsSigned) { 6554 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT"); 6555 DstTy = MVT::i64; 6556 } 6557 6558 assert(DstTy.getSimpleVT() <= MVT::i64 && 6559 DstTy.getSimpleVT() >= MVT::i16 && 6560 "Unknown FP_TO_SINT to lower!"); 6561 6562 // These are really Legal. 6563 if (DstTy == MVT::i32 && 6564 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 6565 return std::make_pair(SDValue(), SDValue()); 6566 if (Subtarget->is64Bit() && 6567 DstTy == MVT::i64 && 6568 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 6569 return std::make_pair(SDValue(), SDValue()); 6570 6571 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 6572 // stack slot. 6573 MachineFunction &MF = DAG.getMachineFunction(); 6574 unsigned MemSize = DstTy.getSizeInBits()/8; 6575 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 6576 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 6577 6578 6579 6580 unsigned Opc; 6581 switch (DstTy.getSimpleVT().SimpleTy) { 6582 default: llvm_unreachable("Invalid FP_TO_SINT to lower!"); 6583 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 6584 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 6585 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 6586 } 6587 6588 SDValue Chain = DAG.getEntryNode(); 6589 SDValue Value = Op.getOperand(0); 6590 EVT TheVT = Op.getOperand(0).getValueType(); 6591 if (isScalarFPTypeInSSEReg(TheVT)) { 6592 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 6593 Chain = DAG.getStore(Chain, DL, Value, StackSlot, 6594 MachinePointerInfo::getFixedStack(SSFI), 6595 false, false, 0); 6596 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 6597 SDValue Ops[] = { 6598 Chain, StackSlot, DAG.getValueType(TheVT) 6599 }; 6600 6601 MachineMemOperand *MMO = 6602 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 6603 MachineMemOperand::MOLoad, MemSize, MemSize); 6604 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, 3, 6605 DstTy, MMO); 6606 Chain = Value.getValue(1); 6607 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 6608 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 6609 } 6610 6611 MachineMemOperand *MMO = 6612 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 6613 MachineMemOperand::MOStore, MemSize, MemSize); 6614 6615 // Build the FP_TO_INT*_IN_MEM 6616 SDValue Ops[] = { Chain, Value, StackSlot }; 6617 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other), 6618 Ops, 3, DstTy, MMO); 6619 6620 return std::make_pair(FIST, StackSlot); 6621} 6622 6623SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, 6624 SelectionDAG &DAG) const { 6625 if (Op.getValueType().isVector()) 6626 return SDValue(); 6627 6628 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, true); 6629 SDValue FIST = Vals.first, StackSlot = Vals.second; 6630 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal. 6631 if (FIST.getNode() == 0) return Op; 6632 6633 // Load the result. 6634 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 6635 FIST, StackSlot, MachinePointerInfo(), false, false, 0); 6636} 6637 6638SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, 6639 SelectionDAG &DAG) const { 6640 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, false); 6641 SDValue FIST = Vals.first, StackSlot = Vals.second; 6642 assert(FIST.getNode() && "Unexpected failure"); 6643 6644 // Load the result. 6645 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 6646 FIST, StackSlot, MachinePointerInfo(), false, false, 0); 6647} 6648 6649SDValue X86TargetLowering::LowerFABS(SDValue Op, 6650 SelectionDAG &DAG) const { 6651 LLVMContext *Context = DAG.getContext(); 6652 DebugLoc dl = Op.getDebugLoc(); 6653 EVT VT = Op.getValueType(); 6654 EVT EltVT = VT; 6655 if (VT.isVector()) 6656 EltVT = VT.getVectorElementType(); 6657 std::vector<Constant*> CV; 6658 if (EltVT == MVT::f64) { 6659 Constant *C = ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63)))); 6660 CV.push_back(C); 6661 CV.push_back(C); 6662 } else { 6663 Constant *C = ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31)))); 6664 CV.push_back(C); 6665 CV.push_back(C); 6666 CV.push_back(C); 6667 CV.push_back(C); 6668 } 6669 Constant *C = ConstantVector::get(CV); 6670 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 6671 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 6672 MachinePointerInfo::getConstantPool(), 6673 false, false, 16); 6674 return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask); 6675} 6676 6677SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const { 6678 LLVMContext *Context = DAG.getContext(); 6679 DebugLoc dl = Op.getDebugLoc(); 6680 EVT VT = Op.getValueType(); 6681 EVT EltVT = VT; 6682 if (VT.isVector()) 6683 EltVT = VT.getVectorElementType(); 6684 std::vector<Constant*> CV; 6685 if (EltVT == MVT::f64) { 6686 Constant *C = ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63))); 6687 CV.push_back(C); 6688 CV.push_back(C); 6689 } else { 6690 Constant *C = ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31))); 6691 CV.push_back(C); 6692 CV.push_back(C); 6693 CV.push_back(C); 6694 CV.push_back(C); 6695 } 6696 Constant *C = ConstantVector::get(CV); 6697 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 6698 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 6699 MachinePointerInfo::getConstantPool(), 6700 false, false, 16); 6701 if (VT.isVector()) { 6702 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, 6703 DAG.getNode(ISD::XOR, dl, MVT::v2i64, 6704 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, 6705 Op.getOperand(0)), 6706 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, Mask))); 6707 } else { 6708 return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask); 6709 } 6710} 6711 6712SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 6713 LLVMContext *Context = DAG.getContext(); 6714 SDValue Op0 = Op.getOperand(0); 6715 SDValue Op1 = Op.getOperand(1); 6716 DebugLoc dl = Op.getDebugLoc(); 6717 EVT VT = Op.getValueType(); 6718 EVT SrcVT = Op1.getValueType(); 6719 6720 // If second operand is smaller, extend it first. 6721 if (SrcVT.bitsLT(VT)) { 6722 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1); 6723 SrcVT = VT; 6724 } 6725 // And if it is bigger, shrink it first. 6726 if (SrcVT.bitsGT(VT)) { 6727 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1)); 6728 SrcVT = VT; 6729 } 6730 6731 // At this point the operands and the result should have the same 6732 // type, and that won't be f80 since that is not custom lowered. 6733 6734 // First get the sign bit of second operand. 6735 std::vector<Constant*> CV; 6736 if (SrcVT == MVT::f64) { 6737 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63)))); 6738 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 6739 } else { 6740 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31)))); 6741 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 6742 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 6743 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 6744 } 6745 Constant *C = ConstantVector::get(CV); 6746 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 6747 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx, 6748 MachinePointerInfo::getConstantPool(), 6749 false, false, 16); 6750 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1); 6751 6752 // Shift sign bit right or left if the two operands have different types. 6753 if (SrcVT.bitsGT(VT)) { 6754 // Op0 is MVT::f32, Op1 is MVT::f64. 6755 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit); 6756 SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit, 6757 DAG.getConstant(32, MVT::i32)); 6758 SignBit = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32, SignBit); 6759 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit, 6760 DAG.getIntPtrConstant(0)); 6761 } 6762 6763 // Clear first operand sign bit. 6764 CV.clear(); 6765 if (VT == MVT::f64) { 6766 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63))))); 6767 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 6768 } else { 6769 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31))))); 6770 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 6771 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 6772 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 6773 } 6774 C = ConstantVector::get(CV); 6775 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 6776 SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 6777 MachinePointerInfo::getConstantPool(), 6778 false, false, 16); 6779 SDValue Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Mask2); 6780 6781 // Or the value with the sign bit. 6782 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit); 6783} 6784 6785/// Emit nodes that will be selected as "test Op0,Op0", or something 6786/// equivalent. 6787SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, 6788 SelectionDAG &DAG) const { 6789 DebugLoc dl = Op.getDebugLoc(); 6790 6791 // CF and OF aren't always set the way we want. Determine which 6792 // of these we need. 6793 bool NeedCF = false; 6794 bool NeedOF = false; 6795 switch (X86CC) { 6796 default: break; 6797 case X86::COND_A: case X86::COND_AE: 6798 case X86::COND_B: case X86::COND_BE: 6799 NeedCF = true; 6800 break; 6801 case X86::COND_G: case X86::COND_GE: 6802 case X86::COND_L: case X86::COND_LE: 6803 case X86::COND_O: case X86::COND_NO: 6804 NeedOF = true; 6805 break; 6806 } 6807 6808 // See if we can use the EFLAGS value from the operand instead of 6809 // doing a separate TEST. TEST always sets OF and CF to 0, so unless 6810 // we prove that the arithmetic won't overflow, we can't use OF or CF. 6811 if (Op.getResNo() != 0 || NeedOF || NeedCF) 6812 // Emit a CMP with 0, which is the TEST pattern. 6813 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 6814 DAG.getConstant(0, Op.getValueType())); 6815 6816 unsigned Opcode = 0; 6817 unsigned NumOperands = 0; 6818 switch (Op.getNode()->getOpcode()) { 6819 case ISD::ADD: 6820 // Due to an isel shortcoming, be conservative if this add is likely to be 6821 // selected as part of a load-modify-store instruction. When the root node 6822 // in a match is a store, isel doesn't know how to remap non-chain non-flag 6823 // uses of other nodes in the match, such as the ADD in this case. This 6824 // leads to the ADD being left around and reselected, with the result being 6825 // two adds in the output. Alas, even if none our users are stores, that 6826 // doesn't prove we're O.K. Ergo, if we have any parents that aren't 6827 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require 6828 // climbing the DAG back to the root, and it doesn't seem to be worth the 6829 // effort. 6830 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 6831 UE = Op.getNode()->use_end(); UI != UE; ++UI) 6832 if (UI->getOpcode() != ISD::CopyToReg && UI->getOpcode() != ISD::SETCC) 6833 goto default_case; 6834 6835 if (ConstantSDNode *C = 6836 dyn_cast<ConstantSDNode>(Op.getNode()->getOperand(1))) { 6837 // An add of one will be selected as an INC. 6838 if (C->getAPIntValue() == 1) { 6839 Opcode = X86ISD::INC; 6840 NumOperands = 1; 6841 break; 6842 } 6843 6844 // An add of negative one (subtract of one) will be selected as a DEC. 6845 if (C->getAPIntValue().isAllOnesValue()) { 6846 Opcode = X86ISD::DEC; 6847 NumOperands = 1; 6848 break; 6849 } 6850 } 6851 6852 // Otherwise use a regular EFLAGS-setting add. 6853 Opcode = X86ISD::ADD; 6854 NumOperands = 2; 6855 break; 6856 case ISD::AND: { 6857 // If the primary and result isn't used, don't bother using X86ISD::AND, 6858 // because a TEST instruction will be better. 6859 bool NonFlagUse = false; 6860 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 6861 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 6862 SDNode *User = *UI; 6863 unsigned UOpNo = UI.getOperandNo(); 6864 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) { 6865 // Look pass truncate. 6866 UOpNo = User->use_begin().getOperandNo(); 6867 User = *User->use_begin(); 6868 } 6869 6870 if (User->getOpcode() != ISD::BRCOND && 6871 User->getOpcode() != ISD::SETCC && 6872 (User->getOpcode() != ISD::SELECT || UOpNo != 0)) { 6873 NonFlagUse = true; 6874 break; 6875 } 6876 } 6877 6878 if (!NonFlagUse) 6879 break; 6880 } 6881 // FALL THROUGH 6882 case ISD::SUB: 6883 case ISD::OR: 6884 case ISD::XOR: 6885 // Due to the ISEL shortcoming noted above, be conservative if this op is 6886 // likely to be selected as part of a load-modify-store instruction. 6887 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 6888 UE = Op.getNode()->use_end(); UI != UE; ++UI) 6889 if (UI->getOpcode() == ISD::STORE) 6890 goto default_case; 6891 6892 // Otherwise use a regular EFLAGS-setting instruction. 6893 switch (Op.getNode()->getOpcode()) { 6894 default: llvm_unreachable("unexpected operator!"); 6895 case ISD::SUB: Opcode = X86ISD::SUB; break; 6896 case ISD::OR: Opcode = X86ISD::OR; break; 6897 case ISD::XOR: Opcode = X86ISD::XOR; break; 6898 case ISD::AND: Opcode = X86ISD::AND; break; 6899 } 6900 6901 NumOperands = 2; 6902 break; 6903 case X86ISD::ADD: 6904 case X86ISD::SUB: 6905 case X86ISD::INC: 6906 case X86ISD::DEC: 6907 case X86ISD::OR: 6908 case X86ISD::XOR: 6909 case X86ISD::AND: 6910 return SDValue(Op.getNode(), 1); 6911 default: 6912 default_case: 6913 break; 6914 } 6915 6916 if (Opcode == 0) 6917 // Emit a CMP with 0, which is the TEST pattern. 6918 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 6919 DAG.getConstant(0, Op.getValueType())); 6920 6921 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 6922 SmallVector<SDValue, 4> Ops; 6923 for (unsigned i = 0; i != NumOperands; ++i) 6924 Ops.push_back(Op.getOperand(i)); 6925 6926 SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands); 6927 DAG.ReplaceAllUsesWith(Op, New); 6928 return SDValue(New.getNode(), 1); 6929} 6930 6931/// Emit nodes that will be selected as "cmp Op0,Op1", or something 6932/// equivalent. 6933SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, 6934 SelectionDAG &DAG) const { 6935 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) 6936 if (C->getAPIntValue() == 0) 6937 return EmitTest(Op0, X86CC, DAG); 6938 6939 DebugLoc dl = Op0.getDebugLoc(); 6940 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1); 6941} 6942 6943/// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node 6944/// if it's possible. 6945SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, 6946 DebugLoc dl, SelectionDAG &DAG) const { 6947 SDValue Op0 = And.getOperand(0); 6948 SDValue Op1 = And.getOperand(1); 6949 if (Op0.getOpcode() == ISD::TRUNCATE) 6950 Op0 = Op0.getOperand(0); 6951 if (Op1.getOpcode() == ISD::TRUNCATE) 6952 Op1 = Op1.getOperand(0); 6953 6954 SDValue LHS, RHS; 6955 if (Op1.getOpcode() == ISD::SHL) 6956 std::swap(Op0, Op1); 6957 if (Op0.getOpcode() == ISD::SHL) { 6958 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0))) 6959 if (And00C->getZExtValue() == 1) { 6960 // If we looked past a truncate, check that it's only truncating away 6961 // known zeros. 6962 unsigned BitWidth = Op0.getValueSizeInBits(); 6963 unsigned AndBitWidth = And.getValueSizeInBits(); 6964 if (BitWidth > AndBitWidth) { 6965 APInt Mask = APInt::getAllOnesValue(BitWidth), Zeros, Ones; 6966 DAG.ComputeMaskedBits(Op0, Mask, Zeros, Ones); 6967 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth) 6968 return SDValue(); 6969 } 6970 LHS = Op1; 6971 RHS = Op0.getOperand(1); 6972 } 6973 } else if (Op1.getOpcode() == ISD::Constant) { 6974 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1); 6975 SDValue AndLHS = Op0; 6976 if (AndRHS->getZExtValue() == 1 && AndLHS.getOpcode() == ISD::SRL) { 6977 LHS = AndLHS.getOperand(0); 6978 RHS = AndLHS.getOperand(1); 6979 } 6980 } 6981 6982 if (LHS.getNode()) { 6983 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT 6984 // instruction. Since the shift amount is in-range-or-undefined, we know 6985 // that doing a bittest on the i32 value is ok. We extend to i32 because 6986 // the encoding for the i16 version is larger than the i32 version. 6987 // Also promote i16 to i32 for performance / code size reason. 6988 if (LHS.getValueType() == MVT::i8 || 6989 LHS.getValueType() == MVT::i16) 6990 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS); 6991 6992 // If the operand types disagree, extend the shift amount to match. Since 6993 // BT ignores high bits (like shifts) we can use anyextend. 6994 if (LHS.getValueType() != RHS.getValueType()) 6995 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS); 6996 6997 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS); 6998 unsigned Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B; 6999 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 7000 DAG.getConstant(Cond, MVT::i8), BT); 7001 } 7002 7003 return SDValue(); 7004} 7005 7006SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 7007 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 7008 SDValue Op0 = Op.getOperand(0); 7009 SDValue Op1 = Op.getOperand(1); 7010 DebugLoc dl = Op.getDebugLoc(); 7011 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 7012 7013 // Optimize to BT if possible. 7014 // Lower (X & (1 << N)) == 0 to BT(X, N). 7015 // Lower ((X >>u N) & 1) != 0 to BT(X, N). 7016 // Lower ((X >>s N) & 1) != 0 to BT(X, N). 7017 if (Op0.getOpcode() == ISD::AND && 7018 Op0.hasOneUse() && 7019 Op1.getOpcode() == ISD::Constant && 7020 cast<ConstantSDNode>(Op1)->isNullValue() && 7021 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 7022 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG); 7023 if (NewSetCC.getNode()) 7024 return NewSetCC; 7025 } 7026 7027 // Look for "(setcc) == / != 1" to avoid unncessary setcc. 7028 if (Op0.getOpcode() == X86ISD::SETCC && 7029 Op1.getOpcode() == ISD::Constant && 7030 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 || 7031 cast<ConstantSDNode>(Op1)->isNullValue()) && 7032 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 7033 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0); 7034 bool Invert = (CC == ISD::SETNE) ^ 7035 cast<ConstantSDNode>(Op1)->isNullValue(); 7036 if (Invert) 7037 CCode = X86::GetOppositeBranchCondition(CCode); 7038 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 7039 DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1)); 7040 } 7041 7042 bool isFP = Op1.getValueType().isFloatingPoint(); 7043 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG); 7044 if (X86CC == X86::COND_INVALID) 7045 return SDValue(); 7046 7047 SDValue Cond = EmitCmp(Op0, Op1, X86CC, DAG); 7048 7049 // Use sbb x, x to materialize carry bit into a GPR. 7050 if (X86CC == X86::COND_B) 7051 return DAG.getNode(ISD::AND, dl, MVT::i8, 7052 DAG.getNode(X86ISD::SETCC_CARRY, dl, MVT::i8, 7053 DAG.getConstant(X86CC, MVT::i8), Cond), 7054 DAG.getConstant(1, MVT::i8)); 7055 7056 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 7057 DAG.getConstant(X86CC, MVT::i8), Cond); 7058} 7059 7060SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const { 7061 SDValue Cond; 7062 SDValue Op0 = Op.getOperand(0); 7063 SDValue Op1 = Op.getOperand(1); 7064 SDValue CC = Op.getOperand(2); 7065 EVT VT = Op.getValueType(); 7066 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 7067 bool isFP = Op.getOperand(1).getValueType().isFloatingPoint(); 7068 DebugLoc dl = Op.getDebugLoc(); 7069 7070 if (isFP) { 7071 unsigned SSECC = 8; 7072 EVT VT0 = Op0.getValueType(); 7073 assert(VT0 == MVT::v4f32 || VT0 == MVT::v2f64); 7074 unsigned Opc = VT0 == MVT::v4f32 ? X86ISD::CMPPS : X86ISD::CMPPD; 7075 bool Swap = false; 7076 7077 switch (SetCCOpcode) { 7078 default: break; 7079 case ISD::SETOEQ: 7080 case ISD::SETEQ: SSECC = 0; break; 7081 case ISD::SETOGT: 7082 case ISD::SETGT: Swap = true; // Fallthrough 7083 case ISD::SETLT: 7084 case ISD::SETOLT: SSECC = 1; break; 7085 case ISD::SETOGE: 7086 case ISD::SETGE: Swap = true; // Fallthrough 7087 case ISD::SETLE: 7088 case ISD::SETOLE: SSECC = 2; break; 7089 case ISD::SETUO: SSECC = 3; break; 7090 case ISD::SETUNE: 7091 case ISD::SETNE: SSECC = 4; break; 7092 case ISD::SETULE: Swap = true; 7093 case ISD::SETUGE: SSECC = 5; break; 7094 case ISD::SETULT: Swap = true; 7095 case ISD::SETUGT: SSECC = 6; break; 7096 case ISD::SETO: SSECC = 7; break; 7097 } 7098 if (Swap) 7099 std::swap(Op0, Op1); 7100 7101 // In the two special cases we can't handle, emit two comparisons. 7102 if (SSECC == 8) { 7103 if (SetCCOpcode == ISD::SETUEQ) { 7104 SDValue UNORD, EQ; 7105 UNORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(3, MVT::i8)); 7106 EQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(0, MVT::i8)); 7107 return DAG.getNode(ISD::OR, dl, VT, UNORD, EQ); 7108 } 7109 else if (SetCCOpcode == ISD::SETONE) { 7110 SDValue ORD, NEQ; 7111 ORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(7, MVT::i8)); 7112 NEQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(4, MVT::i8)); 7113 return DAG.getNode(ISD::AND, dl, VT, ORD, NEQ); 7114 } 7115 llvm_unreachable("Illegal FP comparison"); 7116 } 7117 // Handle all other FP comparisons here. 7118 return DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(SSECC, MVT::i8)); 7119 } 7120 7121 // We are handling one of the integer comparisons here. Since SSE only has 7122 // GT and EQ comparisons for integer, swapping operands and multiple 7123 // operations may be required for some comparisons. 7124 unsigned Opc = 0, EQOpc = 0, GTOpc = 0; 7125 bool Swap = false, Invert = false, FlipSigns = false; 7126 7127 switch (VT.getSimpleVT().SimpleTy) { 7128 default: break; 7129 case MVT::v16i8: EQOpc = X86ISD::PCMPEQB; GTOpc = X86ISD::PCMPGTB; break; 7130 case MVT::v8i16: EQOpc = X86ISD::PCMPEQW; GTOpc = X86ISD::PCMPGTW; break; 7131 case MVT::v4i32: EQOpc = X86ISD::PCMPEQD; GTOpc = X86ISD::PCMPGTD; break; 7132 case MVT::v2i64: EQOpc = X86ISD::PCMPEQQ; GTOpc = X86ISD::PCMPGTQ; break; 7133 } 7134 7135 switch (SetCCOpcode) { 7136 default: break; 7137 case ISD::SETNE: Invert = true; 7138 case ISD::SETEQ: Opc = EQOpc; break; 7139 case ISD::SETLT: Swap = true; 7140 case ISD::SETGT: Opc = GTOpc; break; 7141 case ISD::SETGE: Swap = true; 7142 case ISD::SETLE: Opc = GTOpc; Invert = true; break; 7143 case ISD::SETULT: Swap = true; 7144 case ISD::SETUGT: Opc = GTOpc; FlipSigns = true; break; 7145 case ISD::SETUGE: Swap = true; 7146 case ISD::SETULE: Opc = GTOpc; FlipSigns = true; Invert = true; break; 7147 } 7148 if (Swap) 7149 std::swap(Op0, Op1); 7150 7151 // Since SSE has no unsigned integer comparisons, we need to flip the sign 7152 // bits of the inputs before performing those operations. 7153 if (FlipSigns) { 7154 EVT EltVT = VT.getVectorElementType(); 7155 SDValue SignBit = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), 7156 EltVT); 7157 std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit); 7158 SDValue SignVec = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &SignBits[0], 7159 SignBits.size()); 7160 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SignVec); 7161 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SignVec); 7162 } 7163 7164 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 7165 7166 // If the logical-not of the result is required, perform that now. 7167 if (Invert) 7168 Result = DAG.getNOT(dl, Result, VT); 7169 7170 return Result; 7171} 7172 7173// isX86LogicalCmp - Return true if opcode is a X86 logical comparison. 7174static bool isX86LogicalCmp(SDValue Op) { 7175 unsigned Opc = Op.getNode()->getOpcode(); 7176 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) 7177 return true; 7178 if (Op.getResNo() == 1 && 7179 (Opc == X86ISD::ADD || 7180 Opc == X86ISD::SUB || 7181 Opc == X86ISD::SMUL || 7182 Opc == X86ISD::UMUL || 7183 Opc == X86ISD::INC || 7184 Opc == X86ISD::DEC || 7185 Opc == X86ISD::OR || 7186 Opc == X86ISD::XOR || 7187 Opc == X86ISD::AND)) 7188 return true; 7189 7190 return false; 7191} 7192 7193SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 7194 bool addTest = true; 7195 SDValue Cond = Op.getOperand(0); 7196 DebugLoc dl = Op.getDebugLoc(); 7197 SDValue CC; 7198 7199 if (Cond.getOpcode() == ISD::SETCC) { 7200 SDValue NewCond = LowerSETCC(Cond, DAG); 7201 if (NewCond.getNode()) 7202 Cond = NewCond; 7203 } 7204 7205 // (select (x == 0), -1, 0) -> (sign_bit (x - 1)) 7206 SDValue Op1 = Op.getOperand(1); 7207 SDValue Op2 = Op.getOperand(2); 7208 if (Cond.getOpcode() == X86ISD::SETCC && 7209 cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue() == X86::COND_E) { 7210 SDValue Cmp = Cond.getOperand(1); 7211 if (Cmp.getOpcode() == X86ISD::CMP) { 7212 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op1); 7213 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2); 7214 ConstantSDNode *RHSC = 7215 dyn_cast<ConstantSDNode>(Cmp.getOperand(1).getNode()); 7216 if (N1C && N1C->isAllOnesValue() && 7217 N2C && N2C->isNullValue() && 7218 RHSC && RHSC->isNullValue()) { 7219 SDValue CmpOp0 = Cmp.getOperand(0); 7220 Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 7221 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); 7222 return DAG.getNode(X86ISD::SETCC_CARRY, dl, Op.getValueType(), 7223 DAG.getConstant(X86::COND_B, MVT::i8), Cmp); 7224 } 7225 } 7226 } 7227 7228 // Look pass (and (setcc_carry (cmp ...)), 1). 7229 if (Cond.getOpcode() == ISD::AND && 7230 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 7231 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 7232 if (C && C->getAPIntValue() == 1) 7233 Cond = Cond.getOperand(0); 7234 } 7235 7236 // If condition flag is set by a X86ISD::CMP, then use it as the condition 7237 // setting operand in place of the X86ISD::SETCC. 7238 if (Cond.getOpcode() == X86ISD::SETCC || 7239 Cond.getOpcode() == X86ISD::SETCC_CARRY) { 7240 CC = Cond.getOperand(0); 7241 7242 SDValue Cmp = Cond.getOperand(1); 7243 unsigned Opc = Cmp.getOpcode(); 7244 EVT VT = Op.getValueType(); 7245 7246 bool IllegalFPCMov = false; 7247 if (VT.isFloatingPoint() && !VT.isVector() && 7248 !isScalarFPTypeInSSEReg(VT)) // FPStack? 7249 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue()); 7250 7251 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) || 7252 Opc == X86ISD::BT) { // FIXME 7253 Cond = Cmp; 7254 addTest = false; 7255 } 7256 } 7257 7258 if (addTest) { 7259 // Look pass the truncate. 7260 if (Cond.getOpcode() == ISD::TRUNCATE) 7261 Cond = Cond.getOperand(0); 7262 7263 // We know the result of AND is compared against zero. Try to match 7264 // it to BT. 7265 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 7266 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG); 7267 if (NewSetCC.getNode()) { 7268 CC = NewSetCC.getOperand(0); 7269 Cond = NewSetCC.getOperand(1); 7270 addTest = false; 7271 } 7272 } 7273 } 7274 7275 if (addTest) { 7276 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 7277 Cond = EmitTest(Cond, X86::COND_NE, DAG); 7278 } 7279 7280 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 7281 // condition is true. 7282 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Flag); 7283 SDValue Ops[] = { Op2, Op1, CC, Cond }; 7284 return DAG.getNode(X86ISD::CMOV, dl, VTs, Ops, array_lengthof(Ops)); 7285} 7286 7287// isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or 7288// ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart 7289// from the AND / OR. 7290static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) { 7291 Opc = Op.getOpcode(); 7292 if (Opc != ISD::OR && Opc != ISD::AND) 7293 return false; 7294 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC && 7295 Op.getOperand(0).hasOneUse() && 7296 Op.getOperand(1).getOpcode() == X86ISD::SETCC && 7297 Op.getOperand(1).hasOneUse()); 7298} 7299 7300// isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and 7301// 1 and that the SETCC node has a single use. 7302static bool isXor1OfSetCC(SDValue Op) { 7303 if (Op.getOpcode() != ISD::XOR) 7304 return false; 7305 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 7306 if (N1C && N1C->getAPIntValue() == 1) { 7307 return Op.getOperand(0).getOpcode() == X86ISD::SETCC && 7308 Op.getOperand(0).hasOneUse(); 7309 } 7310 return false; 7311} 7312 7313SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 7314 bool addTest = true; 7315 SDValue Chain = Op.getOperand(0); 7316 SDValue Cond = Op.getOperand(1); 7317 SDValue Dest = Op.getOperand(2); 7318 DebugLoc dl = Op.getDebugLoc(); 7319 SDValue CC; 7320 7321 if (Cond.getOpcode() == ISD::SETCC) { 7322 SDValue NewCond = LowerSETCC(Cond, DAG); 7323 if (NewCond.getNode()) 7324 Cond = NewCond; 7325 } 7326#if 0 7327 // FIXME: LowerXALUO doesn't handle these!! 7328 else if (Cond.getOpcode() == X86ISD::ADD || 7329 Cond.getOpcode() == X86ISD::SUB || 7330 Cond.getOpcode() == X86ISD::SMUL || 7331 Cond.getOpcode() == X86ISD::UMUL) 7332 Cond = LowerXALUO(Cond, DAG); 7333#endif 7334 7335 // Look pass (and (setcc_carry (cmp ...)), 1). 7336 if (Cond.getOpcode() == ISD::AND && 7337 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 7338 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 7339 if (C && C->getAPIntValue() == 1) 7340 Cond = Cond.getOperand(0); 7341 } 7342 7343 // If condition flag is set by a X86ISD::CMP, then use it as the condition 7344 // setting operand in place of the X86ISD::SETCC. 7345 if (Cond.getOpcode() == X86ISD::SETCC || 7346 Cond.getOpcode() == X86ISD::SETCC_CARRY) { 7347 CC = Cond.getOperand(0); 7348 7349 SDValue Cmp = Cond.getOperand(1); 7350 unsigned Opc = Cmp.getOpcode(); 7351 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp?? 7352 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) { 7353 Cond = Cmp; 7354 addTest = false; 7355 } else { 7356 switch (cast<ConstantSDNode>(CC)->getZExtValue()) { 7357 default: break; 7358 case X86::COND_O: 7359 case X86::COND_B: 7360 // These can only come from an arithmetic instruction with overflow, 7361 // e.g. SADDO, UADDO. 7362 Cond = Cond.getNode()->getOperand(1); 7363 addTest = false; 7364 break; 7365 } 7366 } 7367 } else { 7368 unsigned CondOpc; 7369 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) { 7370 SDValue Cmp = Cond.getOperand(0).getOperand(1); 7371 if (CondOpc == ISD::OR) { 7372 // Also, recognize the pattern generated by an FCMP_UNE. We can emit 7373 // two branches instead of an explicit OR instruction with a 7374 // separate test. 7375 if (Cmp == Cond.getOperand(1).getOperand(1) && 7376 isX86LogicalCmp(Cmp)) { 7377 CC = Cond.getOperand(0).getOperand(0); 7378 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 7379 Chain, Dest, CC, Cmp); 7380 CC = Cond.getOperand(1).getOperand(0); 7381 Cond = Cmp; 7382 addTest = false; 7383 } 7384 } else { // ISD::AND 7385 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit 7386 // two branches instead of an explicit AND instruction with a 7387 // separate test. However, we only do this if this block doesn't 7388 // have a fall-through edge, because this requires an explicit 7389 // jmp when the condition is false. 7390 if (Cmp == Cond.getOperand(1).getOperand(1) && 7391 isX86LogicalCmp(Cmp) && 7392 Op.getNode()->hasOneUse()) { 7393 X86::CondCode CCode = 7394 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 7395 CCode = X86::GetOppositeBranchCondition(CCode); 7396 CC = DAG.getConstant(CCode, MVT::i8); 7397 SDNode *User = *Op.getNode()->use_begin(); 7398 // Look for an unconditional branch following this conditional branch. 7399 // We need this because we need to reverse the successors in order 7400 // to implement FCMP_OEQ. 7401 if (User->getOpcode() == ISD::BR) { 7402 SDValue FalseBB = User->getOperand(1); 7403 SDNode *NewBR = 7404 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 7405 assert(NewBR == User); 7406 (void)NewBR; 7407 Dest = FalseBB; 7408 7409 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 7410 Chain, Dest, CC, Cmp); 7411 X86::CondCode CCode = 7412 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0); 7413 CCode = X86::GetOppositeBranchCondition(CCode); 7414 CC = DAG.getConstant(CCode, MVT::i8); 7415 Cond = Cmp; 7416 addTest = false; 7417 } 7418 } 7419 } 7420 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) { 7421 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition. 7422 // It should be transformed during dag combiner except when the condition 7423 // is set by a arithmetics with overflow node. 7424 X86::CondCode CCode = 7425 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 7426 CCode = X86::GetOppositeBranchCondition(CCode); 7427 CC = DAG.getConstant(CCode, MVT::i8); 7428 Cond = Cond.getOperand(0).getOperand(1); 7429 addTest = false; 7430 } 7431 } 7432 7433 if (addTest) { 7434 // Look pass the truncate. 7435 if (Cond.getOpcode() == ISD::TRUNCATE) 7436 Cond = Cond.getOperand(0); 7437 7438 // We know the result of AND is compared against zero. Try to match 7439 // it to BT. 7440 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 7441 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG); 7442 if (NewSetCC.getNode()) { 7443 CC = NewSetCC.getOperand(0); 7444 Cond = NewSetCC.getOperand(1); 7445 addTest = false; 7446 } 7447 } 7448 } 7449 7450 if (addTest) { 7451 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 7452 Cond = EmitTest(Cond, X86::COND_NE, DAG); 7453 } 7454 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 7455 Chain, Dest, CC, Cond); 7456} 7457 7458 7459// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 7460// Calls to _alloca is needed to probe the stack when allocating more than 4k 7461// bytes in one go. Touching the stack at 4K increments is necessary to ensure 7462// that the guard pages used by the OS virtual memory manager are allocated in 7463// correct sequence. 7464SDValue 7465X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 7466 SelectionDAG &DAG) const { 7467 assert((Subtarget->isTargetCygMing() || Subtarget->isTargetWindows()) && 7468 "This should be used only on Windows targets"); 7469 DebugLoc dl = Op.getDebugLoc(); 7470 7471 // Get the inputs. 7472 SDValue Chain = Op.getOperand(0); 7473 SDValue Size = Op.getOperand(1); 7474 // FIXME: Ensure alignment here 7475 7476 SDValue Flag; 7477 7478 EVT SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; 7479 7480 Chain = DAG.getCopyToReg(Chain, dl, X86::EAX, Size, Flag); 7481 Flag = Chain.getValue(1); 7482 7483 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 7484 7485 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag); 7486 Flag = Chain.getValue(1); 7487 7488 Chain = DAG.getCopyFromReg(Chain, dl, X86StackPtr, SPTy).getValue(1); 7489 7490 SDValue Ops1[2] = { Chain.getValue(0), Chain }; 7491 return DAG.getMergeValues(Ops1, 2, dl); 7492} 7493 7494SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 7495 MachineFunction &MF = DAG.getMachineFunction(); 7496 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 7497 7498 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 7499 DebugLoc DL = Op.getDebugLoc(); 7500 7501 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) { 7502 // vastart just stores the address of the VarArgsFrameIndex slot into the 7503 // memory location argument. 7504 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 7505 getPointerTy()); 7506 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), 7507 MachinePointerInfo(SV), false, false, 0); 7508 } 7509 7510 // __va_list_tag: 7511 // gp_offset (0 - 6 * 8) 7512 // fp_offset (48 - 48 + 8 * 16) 7513 // overflow_arg_area (point to parameters coming in memory). 7514 // reg_save_area 7515 SmallVector<SDValue, 8> MemOps; 7516 SDValue FIN = Op.getOperand(1); 7517 // Store gp_offset 7518 SDValue Store = DAG.getStore(Op.getOperand(0), DL, 7519 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), 7520 MVT::i32), 7521 FIN, MachinePointerInfo(SV), false, false, 0); 7522 MemOps.push_back(Store); 7523 7524 // Store fp_offset 7525 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7526 FIN, DAG.getIntPtrConstant(4)); 7527 Store = DAG.getStore(Op.getOperand(0), DL, 7528 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), 7529 MVT::i32), 7530 FIN, MachinePointerInfo(SV, 4), false, false, 0); 7531 MemOps.push_back(Store); 7532 7533 // Store ptr to overflow_arg_area 7534 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7535 FIN, DAG.getIntPtrConstant(4)); 7536 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 7537 getPointerTy()); 7538 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, 7539 MachinePointerInfo(SV, 8), 7540 false, false, 0); 7541 MemOps.push_back(Store); 7542 7543 // Store ptr to reg_save_area. 7544 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7545 FIN, DAG.getIntPtrConstant(8)); 7546 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 7547 getPointerTy()); 7548 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN, 7549 MachinePointerInfo(SV, 16), false, false, 0); 7550 MemOps.push_back(Store); 7551 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 7552 &MemOps[0], MemOps.size()); 7553} 7554 7555SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 7556 assert(Subtarget->is64Bit() && 7557 "LowerVAARG only handles 64-bit va_arg!"); 7558 assert((Subtarget->isTargetLinux() || 7559 Subtarget->isTargetDarwin()) && 7560 "Unhandled target in LowerVAARG"); 7561 assert(Op.getNode()->getNumOperands() == 4); 7562 SDValue Chain = Op.getOperand(0); 7563 SDValue SrcPtr = Op.getOperand(1); 7564 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 7565 unsigned Align = Op.getConstantOperandVal(3); 7566 DebugLoc dl = Op.getDebugLoc(); 7567 7568 EVT ArgVT = Op.getNode()->getValueType(0); 7569 const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 7570 uint32_t ArgSize = getTargetData()->getTypeAllocSize(ArgTy); 7571 uint8_t ArgMode; 7572 7573 // Decide which area this value should be read from. 7574 // TODO: Implement the AMD64 ABI in its entirety. This simple 7575 // selection mechanism works only for the basic types. 7576 if (ArgVT == MVT::f80) { 7577 llvm_unreachable("va_arg for f80 not yet implemented"); 7578 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) { 7579 ArgMode = 2; // Argument passed in XMM register. Use fp_offset. 7580 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) { 7581 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset. 7582 } else { 7583 llvm_unreachable("Unhandled argument type in LowerVAARG"); 7584 } 7585 7586 if (ArgMode == 2) { 7587 // Sanity Check: Make sure using fp_offset makes sense. 7588 assert(!UseSoftFloat && 7589 !(DAG.getMachineFunction() 7590 .getFunction()->hasFnAttr(Attribute::NoImplicitFloat)) && 7591 Subtarget->hasSSE1()); 7592 } 7593 7594 // Insert VAARG_64 node into the DAG 7595 // VAARG_64 returns two values: Variable Argument Address, Chain 7596 SmallVector<SDValue, 11> InstOps; 7597 InstOps.push_back(Chain); 7598 InstOps.push_back(SrcPtr); 7599 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32)); 7600 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8)); 7601 InstOps.push_back(DAG.getConstant(Align, MVT::i32)); 7602 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other); 7603 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl, 7604 VTs, &InstOps[0], InstOps.size(), 7605 MVT::i64, 7606 MachinePointerInfo(SV), 7607 /*Align=*/0, 7608 /*Volatile=*/false, 7609 /*ReadMem=*/true, 7610 /*WriteMem=*/true); 7611 Chain = VAARG.getValue(1); 7612 7613 // Load the next argument and return it 7614 return DAG.getLoad(ArgVT, dl, 7615 Chain, 7616 VAARG, 7617 MachinePointerInfo(), 7618 false, false, 0); 7619} 7620 7621SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 7622 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 7623 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!"); 7624 SDValue Chain = Op.getOperand(0); 7625 SDValue DstPtr = Op.getOperand(1); 7626 SDValue SrcPtr = Op.getOperand(2); 7627 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 7628 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 7629 DebugLoc DL = Op.getDebugLoc(); 7630 7631 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, 7632 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false, 7633 false, 7634 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 7635} 7636 7637SDValue 7638X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { 7639 DebugLoc dl = Op.getDebugLoc(); 7640 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7641 switch (IntNo) { 7642 default: return SDValue(); // Don't custom lower most intrinsics. 7643 // Comparison intrinsics. 7644 case Intrinsic::x86_sse_comieq_ss: 7645 case Intrinsic::x86_sse_comilt_ss: 7646 case Intrinsic::x86_sse_comile_ss: 7647 case Intrinsic::x86_sse_comigt_ss: 7648 case Intrinsic::x86_sse_comige_ss: 7649 case Intrinsic::x86_sse_comineq_ss: 7650 case Intrinsic::x86_sse_ucomieq_ss: 7651 case Intrinsic::x86_sse_ucomilt_ss: 7652 case Intrinsic::x86_sse_ucomile_ss: 7653 case Intrinsic::x86_sse_ucomigt_ss: 7654 case Intrinsic::x86_sse_ucomige_ss: 7655 case Intrinsic::x86_sse_ucomineq_ss: 7656 case Intrinsic::x86_sse2_comieq_sd: 7657 case Intrinsic::x86_sse2_comilt_sd: 7658 case Intrinsic::x86_sse2_comile_sd: 7659 case Intrinsic::x86_sse2_comigt_sd: 7660 case Intrinsic::x86_sse2_comige_sd: 7661 case Intrinsic::x86_sse2_comineq_sd: 7662 case Intrinsic::x86_sse2_ucomieq_sd: 7663 case Intrinsic::x86_sse2_ucomilt_sd: 7664 case Intrinsic::x86_sse2_ucomile_sd: 7665 case Intrinsic::x86_sse2_ucomigt_sd: 7666 case Intrinsic::x86_sse2_ucomige_sd: 7667 case Intrinsic::x86_sse2_ucomineq_sd: { 7668 unsigned Opc = 0; 7669 ISD::CondCode CC = ISD::SETCC_INVALID; 7670 switch (IntNo) { 7671 default: break; 7672 case Intrinsic::x86_sse_comieq_ss: 7673 case Intrinsic::x86_sse2_comieq_sd: 7674 Opc = X86ISD::COMI; 7675 CC = ISD::SETEQ; 7676 break; 7677 case Intrinsic::x86_sse_comilt_ss: 7678 case Intrinsic::x86_sse2_comilt_sd: 7679 Opc = X86ISD::COMI; 7680 CC = ISD::SETLT; 7681 break; 7682 case Intrinsic::x86_sse_comile_ss: 7683 case Intrinsic::x86_sse2_comile_sd: 7684 Opc = X86ISD::COMI; 7685 CC = ISD::SETLE; 7686 break; 7687 case Intrinsic::x86_sse_comigt_ss: 7688 case Intrinsic::x86_sse2_comigt_sd: 7689 Opc = X86ISD::COMI; 7690 CC = ISD::SETGT; 7691 break; 7692 case Intrinsic::x86_sse_comige_ss: 7693 case Intrinsic::x86_sse2_comige_sd: 7694 Opc = X86ISD::COMI; 7695 CC = ISD::SETGE; 7696 break; 7697 case Intrinsic::x86_sse_comineq_ss: 7698 case Intrinsic::x86_sse2_comineq_sd: 7699 Opc = X86ISD::COMI; 7700 CC = ISD::SETNE; 7701 break; 7702 case Intrinsic::x86_sse_ucomieq_ss: 7703 case Intrinsic::x86_sse2_ucomieq_sd: 7704 Opc = X86ISD::UCOMI; 7705 CC = ISD::SETEQ; 7706 break; 7707 case Intrinsic::x86_sse_ucomilt_ss: 7708 case Intrinsic::x86_sse2_ucomilt_sd: 7709 Opc = X86ISD::UCOMI; 7710 CC = ISD::SETLT; 7711 break; 7712 case Intrinsic::x86_sse_ucomile_ss: 7713 case Intrinsic::x86_sse2_ucomile_sd: 7714 Opc = X86ISD::UCOMI; 7715 CC = ISD::SETLE; 7716 break; 7717 case Intrinsic::x86_sse_ucomigt_ss: 7718 case Intrinsic::x86_sse2_ucomigt_sd: 7719 Opc = X86ISD::UCOMI; 7720 CC = ISD::SETGT; 7721 break; 7722 case Intrinsic::x86_sse_ucomige_ss: 7723 case Intrinsic::x86_sse2_ucomige_sd: 7724 Opc = X86ISD::UCOMI; 7725 CC = ISD::SETGE; 7726 break; 7727 case Intrinsic::x86_sse_ucomineq_ss: 7728 case Intrinsic::x86_sse2_ucomineq_sd: 7729 Opc = X86ISD::UCOMI; 7730 CC = ISD::SETNE; 7731 break; 7732 } 7733 7734 SDValue LHS = Op.getOperand(1); 7735 SDValue RHS = Op.getOperand(2); 7736 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG); 7737 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!"); 7738 SDValue Cond = DAG.getNode(Opc, dl, MVT::i32, LHS, RHS); 7739 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 7740 DAG.getConstant(X86CC, MVT::i8), Cond); 7741 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 7742 } 7743 // ptest and testp intrinsics. The intrinsic these come from are designed to 7744 // return an integer value, not just an instruction so lower it to the ptest 7745 // or testp pattern and a setcc for the result. 7746 case Intrinsic::x86_sse41_ptestz: 7747 case Intrinsic::x86_sse41_ptestc: 7748 case Intrinsic::x86_sse41_ptestnzc: 7749 case Intrinsic::x86_avx_ptestz_256: 7750 case Intrinsic::x86_avx_ptestc_256: 7751 case Intrinsic::x86_avx_ptestnzc_256: 7752 case Intrinsic::x86_avx_vtestz_ps: 7753 case Intrinsic::x86_avx_vtestc_ps: 7754 case Intrinsic::x86_avx_vtestnzc_ps: 7755 case Intrinsic::x86_avx_vtestz_pd: 7756 case Intrinsic::x86_avx_vtestc_pd: 7757 case Intrinsic::x86_avx_vtestnzc_pd: 7758 case Intrinsic::x86_avx_vtestz_ps_256: 7759 case Intrinsic::x86_avx_vtestc_ps_256: 7760 case Intrinsic::x86_avx_vtestnzc_ps_256: 7761 case Intrinsic::x86_avx_vtestz_pd_256: 7762 case Intrinsic::x86_avx_vtestc_pd_256: 7763 case Intrinsic::x86_avx_vtestnzc_pd_256: { 7764 bool IsTestPacked = false; 7765 unsigned X86CC = 0; 7766 switch (IntNo) { 7767 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering."); 7768 case Intrinsic::x86_avx_vtestz_ps: 7769 case Intrinsic::x86_avx_vtestz_pd: 7770 case Intrinsic::x86_avx_vtestz_ps_256: 7771 case Intrinsic::x86_avx_vtestz_pd_256: 7772 IsTestPacked = true; // Fallthrough 7773 case Intrinsic::x86_sse41_ptestz: 7774 case Intrinsic::x86_avx_ptestz_256: 7775 // ZF = 1 7776 X86CC = X86::COND_E; 7777 break; 7778 case Intrinsic::x86_avx_vtestc_ps: 7779 case Intrinsic::x86_avx_vtestc_pd: 7780 case Intrinsic::x86_avx_vtestc_ps_256: 7781 case Intrinsic::x86_avx_vtestc_pd_256: 7782 IsTestPacked = true; // Fallthrough 7783 case Intrinsic::x86_sse41_ptestc: 7784 case Intrinsic::x86_avx_ptestc_256: 7785 // CF = 1 7786 X86CC = X86::COND_B; 7787 break; 7788 case Intrinsic::x86_avx_vtestnzc_ps: 7789 case Intrinsic::x86_avx_vtestnzc_pd: 7790 case Intrinsic::x86_avx_vtestnzc_ps_256: 7791 case Intrinsic::x86_avx_vtestnzc_pd_256: 7792 IsTestPacked = true; // Fallthrough 7793 case Intrinsic::x86_sse41_ptestnzc: 7794 case Intrinsic::x86_avx_ptestnzc_256: 7795 // ZF and CF = 0 7796 X86CC = X86::COND_A; 7797 break; 7798 } 7799 7800 SDValue LHS = Op.getOperand(1); 7801 SDValue RHS = Op.getOperand(2); 7802 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST; 7803 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS); 7804 SDValue CC = DAG.getConstant(X86CC, MVT::i8); 7805 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test); 7806 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 7807 } 7808 7809 // Fix vector shift instructions where the last operand is a non-immediate 7810 // i32 value. 7811 case Intrinsic::x86_sse2_pslli_w: 7812 case Intrinsic::x86_sse2_pslli_d: 7813 case Intrinsic::x86_sse2_pslli_q: 7814 case Intrinsic::x86_sse2_psrli_w: 7815 case Intrinsic::x86_sse2_psrli_d: 7816 case Intrinsic::x86_sse2_psrli_q: 7817 case Intrinsic::x86_sse2_psrai_w: 7818 case Intrinsic::x86_sse2_psrai_d: 7819 case Intrinsic::x86_mmx_pslli_w: 7820 case Intrinsic::x86_mmx_pslli_d: 7821 case Intrinsic::x86_mmx_pslli_q: 7822 case Intrinsic::x86_mmx_psrli_w: 7823 case Intrinsic::x86_mmx_psrli_d: 7824 case Intrinsic::x86_mmx_psrli_q: 7825 case Intrinsic::x86_mmx_psrai_w: 7826 case Intrinsic::x86_mmx_psrai_d: { 7827 SDValue ShAmt = Op.getOperand(2); 7828 if (isa<ConstantSDNode>(ShAmt)) 7829 return SDValue(); 7830 7831 unsigned NewIntNo = 0; 7832 EVT ShAmtVT = MVT::v4i32; 7833 switch (IntNo) { 7834 case Intrinsic::x86_sse2_pslli_w: 7835 NewIntNo = Intrinsic::x86_sse2_psll_w; 7836 break; 7837 case Intrinsic::x86_sse2_pslli_d: 7838 NewIntNo = Intrinsic::x86_sse2_psll_d; 7839 break; 7840 case Intrinsic::x86_sse2_pslli_q: 7841 NewIntNo = Intrinsic::x86_sse2_psll_q; 7842 break; 7843 case Intrinsic::x86_sse2_psrli_w: 7844 NewIntNo = Intrinsic::x86_sse2_psrl_w; 7845 break; 7846 case Intrinsic::x86_sse2_psrli_d: 7847 NewIntNo = Intrinsic::x86_sse2_psrl_d; 7848 break; 7849 case Intrinsic::x86_sse2_psrli_q: 7850 NewIntNo = Intrinsic::x86_sse2_psrl_q; 7851 break; 7852 case Intrinsic::x86_sse2_psrai_w: 7853 NewIntNo = Intrinsic::x86_sse2_psra_w; 7854 break; 7855 case Intrinsic::x86_sse2_psrai_d: 7856 NewIntNo = Intrinsic::x86_sse2_psra_d; 7857 break; 7858 default: { 7859 ShAmtVT = MVT::v2i32; 7860 switch (IntNo) { 7861 case Intrinsic::x86_mmx_pslli_w: 7862 NewIntNo = Intrinsic::x86_mmx_psll_w; 7863 break; 7864 case Intrinsic::x86_mmx_pslli_d: 7865 NewIntNo = Intrinsic::x86_mmx_psll_d; 7866 break; 7867 case Intrinsic::x86_mmx_pslli_q: 7868 NewIntNo = Intrinsic::x86_mmx_psll_q; 7869 break; 7870 case Intrinsic::x86_mmx_psrli_w: 7871 NewIntNo = Intrinsic::x86_mmx_psrl_w; 7872 break; 7873 case Intrinsic::x86_mmx_psrli_d: 7874 NewIntNo = Intrinsic::x86_mmx_psrl_d; 7875 break; 7876 case Intrinsic::x86_mmx_psrli_q: 7877 NewIntNo = Intrinsic::x86_mmx_psrl_q; 7878 break; 7879 case Intrinsic::x86_mmx_psrai_w: 7880 NewIntNo = Intrinsic::x86_mmx_psra_w; 7881 break; 7882 case Intrinsic::x86_mmx_psrai_d: 7883 NewIntNo = Intrinsic::x86_mmx_psra_d; 7884 break; 7885 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 7886 } 7887 break; 7888 } 7889 } 7890 7891 // The vector shift intrinsics with scalars uses 32b shift amounts but 7892 // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits 7893 // to be zero. 7894 SDValue ShOps[4]; 7895 ShOps[0] = ShAmt; 7896 ShOps[1] = DAG.getConstant(0, MVT::i32); 7897 if (ShAmtVT == MVT::v4i32) { 7898 ShOps[2] = DAG.getUNDEF(MVT::i32); 7899 ShOps[3] = DAG.getUNDEF(MVT::i32); 7900 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 4); 7901 } else { 7902 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 2); 7903// FIXME this must be lowered to get rid of the invalid type. 7904 } 7905 7906 EVT VT = Op.getValueType(); 7907 ShAmt = DAG.getNode(ISD::BIT_CONVERT, dl, VT, ShAmt); 7908 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 7909 DAG.getConstant(NewIntNo, MVT::i32), 7910 Op.getOperand(1), ShAmt); 7911 } 7912 } 7913} 7914 7915SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, 7916 SelectionDAG &DAG) const { 7917 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7918 MFI->setReturnAddressIsTaken(true); 7919 7920 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7921 DebugLoc dl = Op.getDebugLoc(); 7922 7923 if (Depth > 0) { 7924 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 7925 SDValue Offset = 7926 DAG.getConstant(TD->getPointerSize(), 7927 Subtarget->is64Bit() ? MVT::i64 : MVT::i32); 7928 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 7929 DAG.getNode(ISD::ADD, dl, getPointerTy(), 7930 FrameAddr, Offset), 7931 MachinePointerInfo(), false, false, 0); 7932 } 7933 7934 // Just load the return address. 7935 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG); 7936 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 7937 RetAddrFI, MachinePointerInfo(), false, false, 0); 7938} 7939 7940SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 7941 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7942 MFI->setFrameAddressIsTaken(true); 7943 7944 EVT VT = Op.getValueType(); 7945 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 7946 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7947 unsigned FrameReg = Subtarget->is64Bit() ? X86::RBP : X86::EBP; 7948 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 7949 while (Depth--) 7950 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 7951 MachinePointerInfo(), 7952 false, false, 0); 7953 return FrameAddr; 7954} 7955 7956SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, 7957 SelectionDAG &DAG) const { 7958 return DAG.getIntPtrConstant(2*TD->getPointerSize()); 7959} 7960 7961SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 7962 MachineFunction &MF = DAG.getMachineFunction(); 7963 SDValue Chain = Op.getOperand(0); 7964 SDValue Offset = Op.getOperand(1); 7965 SDValue Handler = Op.getOperand(2); 7966 DebugLoc dl = Op.getDebugLoc(); 7967 7968 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 7969 Subtarget->is64Bit() ? X86::RBP : X86::EBP, 7970 getPointerTy()); 7971 unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX); 7972 7973 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame, 7974 DAG.getIntPtrConstant(TD->getPointerSize())); 7975 StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset); 7976 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), 7977 false, false, 0); 7978 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr); 7979 MF.getRegInfo().addLiveOut(StoreAddrReg); 7980 7981 return DAG.getNode(X86ISD::EH_RETURN, dl, 7982 MVT::Other, 7983 Chain, DAG.getRegister(StoreAddrReg, getPointerTy())); 7984} 7985 7986SDValue X86TargetLowering::LowerTRAMPOLINE(SDValue Op, 7987 SelectionDAG &DAG) const { 7988 SDValue Root = Op.getOperand(0); 7989 SDValue Trmp = Op.getOperand(1); // trampoline 7990 SDValue FPtr = Op.getOperand(2); // nested function 7991 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 7992 DebugLoc dl = Op.getDebugLoc(); 7993 7994 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 7995 7996 if (Subtarget->is64Bit()) { 7997 SDValue OutChains[6]; 7998 7999 // Large code-model. 8000 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode. 8001 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode. 8002 8003 const unsigned char N86R10 = RegInfo->getX86RegNum(X86::R10); 8004 const unsigned char N86R11 = RegInfo->getX86RegNum(X86::R11); 8005 8006 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 8007 8008 // Load the pointer to the nested function into R11. 8009 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 8010 SDValue Addr = Trmp; 8011 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 8012 Addr, MachinePointerInfo(TrmpAddr), 8013 false, false, 0); 8014 8015 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 8016 DAG.getConstant(2, MVT::i64)); 8017 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr, 8018 MachinePointerInfo(TrmpAddr, 2), 8019 false, false, 2); 8020 8021 // Load the 'nest' parameter value into R10. 8022 // R10 is specified in X86CallingConv.td 8023 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 8024 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 8025 DAG.getConstant(10, MVT::i64)); 8026 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 8027 Addr, MachinePointerInfo(TrmpAddr, 10), 8028 false, false, 0); 8029 8030 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 8031 DAG.getConstant(12, MVT::i64)); 8032 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr, 8033 MachinePointerInfo(TrmpAddr, 12), 8034 false, false, 2); 8035 8036 // Jump to the nested function. 8037 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 8038 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 8039 DAG.getConstant(20, MVT::i64)); 8040 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 8041 Addr, MachinePointerInfo(TrmpAddr, 20), 8042 false, false, 0); 8043 8044 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 8045 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 8046 DAG.getConstant(22, MVT::i64)); 8047 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr, 8048 MachinePointerInfo(TrmpAddr, 22), 8049 false, false, 0); 8050 8051 SDValue Ops[] = 8052 { Trmp, DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 6) }; 8053 return DAG.getMergeValues(Ops, 2, dl); 8054 } else { 8055 const Function *Func = 8056 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 8057 CallingConv::ID CC = Func->getCallingConv(); 8058 unsigned NestReg; 8059 8060 switch (CC) { 8061 default: 8062 llvm_unreachable("Unsupported calling convention"); 8063 case CallingConv::C: 8064 case CallingConv::X86_StdCall: { 8065 // Pass 'nest' parameter in ECX. 8066 // Must be kept in sync with X86CallingConv.td 8067 NestReg = X86::ECX; 8068 8069 // Check that ECX wasn't needed by an 'inreg' parameter. 8070 const FunctionType *FTy = Func->getFunctionType(); 8071 const AttrListPtr &Attrs = Func->getAttributes(); 8072 8073 if (!Attrs.isEmpty() && !Func->isVarArg()) { 8074 unsigned InRegCount = 0; 8075 unsigned Idx = 1; 8076 8077 for (FunctionType::param_iterator I = FTy->param_begin(), 8078 E = FTy->param_end(); I != E; ++I, ++Idx) 8079 if (Attrs.paramHasAttr(Idx, Attribute::InReg)) 8080 // FIXME: should only count parameters that are lowered to integers. 8081 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32; 8082 8083 if (InRegCount > 2) { 8084 report_fatal_error("Nest register in use - reduce number of inreg" 8085 " parameters!"); 8086 } 8087 } 8088 break; 8089 } 8090 case CallingConv::X86_FastCall: 8091 case CallingConv::X86_ThisCall: 8092 case CallingConv::Fast: 8093 // Pass 'nest' parameter in EAX. 8094 // Must be kept in sync with X86CallingConv.td 8095 NestReg = X86::EAX; 8096 break; 8097 } 8098 8099 SDValue OutChains[4]; 8100 SDValue Addr, Disp; 8101 8102 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 8103 DAG.getConstant(10, MVT::i32)); 8104 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr); 8105 8106 // This is storing the opcode for MOV32ri. 8107 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte. 8108 const unsigned char N86Reg = RegInfo->getX86RegNum(NestReg); 8109 OutChains[0] = DAG.getStore(Root, dl, 8110 DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 8111 Trmp, MachinePointerInfo(TrmpAddr), 8112 false, false, 0); 8113 8114 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 8115 DAG.getConstant(1, MVT::i32)); 8116 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr, 8117 MachinePointerInfo(TrmpAddr, 1), 8118 false, false, 1); 8119 8120 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode. 8121 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 8122 DAG.getConstant(5, MVT::i32)); 8123 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr, 8124 MachinePointerInfo(TrmpAddr, 5), 8125 false, false, 1); 8126 8127 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 8128 DAG.getConstant(6, MVT::i32)); 8129 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr, 8130 MachinePointerInfo(TrmpAddr, 6), 8131 false, false, 1); 8132 8133 SDValue Ops[] = 8134 { Trmp, DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 4) }; 8135 return DAG.getMergeValues(Ops, 2, dl); 8136 } 8137} 8138 8139SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, 8140 SelectionDAG &DAG) const { 8141 /* 8142 The rounding mode is in bits 11:10 of FPSR, and has the following 8143 settings: 8144 00 Round to nearest 8145 01 Round to -inf 8146 10 Round to +inf 8147 11 Round to 0 8148 8149 FLT_ROUNDS, on the other hand, expects the following: 8150 -1 Undefined 8151 0 Round to 0 8152 1 Round to nearest 8153 2 Round to +inf 8154 3 Round to -inf 8155 8156 To perform the conversion, we do: 8157 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 8158 */ 8159 8160 MachineFunction &MF = DAG.getMachineFunction(); 8161 const TargetMachine &TM = MF.getTarget(); 8162 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 8163 unsigned StackAlignment = TFI.getStackAlignment(); 8164 EVT VT = Op.getValueType(); 8165 DebugLoc DL = Op.getDebugLoc(); 8166 8167 // Save FP Control Word to stack slot 8168 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false); 8169 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8170 8171 8172 MachineMemOperand *MMO = 8173 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8174 MachineMemOperand::MOStore, 2, 2); 8175 8176 SDValue Ops[] = { DAG.getEntryNode(), StackSlot }; 8177 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL, 8178 DAG.getVTList(MVT::Other), 8179 Ops, 2, MVT::i16, MMO); 8180 8181 // Load FP Control Word from stack slot 8182 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, 8183 MachinePointerInfo(), false, false, 0); 8184 8185 // Transform as necessary 8186 SDValue CWD1 = 8187 DAG.getNode(ISD::SRL, DL, MVT::i16, 8188 DAG.getNode(ISD::AND, DL, MVT::i16, 8189 CWD, DAG.getConstant(0x800, MVT::i16)), 8190 DAG.getConstant(11, MVT::i8)); 8191 SDValue CWD2 = 8192 DAG.getNode(ISD::SRL, DL, MVT::i16, 8193 DAG.getNode(ISD::AND, DL, MVT::i16, 8194 CWD, DAG.getConstant(0x400, MVT::i16)), 8195 DAG.getConstant(9, MVT::i8)); 8196 8197 SDValue RetVal = 8198 DAG.getNode(ISD::AND, DL, MVT::i16, 8199 DAG.getNode(ISD::ADD, DL, MVT::i16, 8200 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2), 8201 DAG.getConstant(1, MVT::i16)), 8202 DAG.getConstant(3, MVT::i16)); 8203 8204 8205 return DAG.getNode((VT.getSizeInBits() < 16 ? 8206 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal); 8207} 8208 8209SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const { 8210 EVT VT = Op.getValueType(); 8211 EVT OpVT = VT; 8212 unsigned NumBits = VT.getSizeInBits(); 8213 DebugLoc dl = Op.getDebugLoc(); 8214 8215 Op = Op.getOperand(0); 8216 if (VT == MVT::i8) { 8217 // Zero extend to i32 since there is not an i8 bsr. 8218 OpVT = MVT::i32; 8219 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 8220 } 8221 8222 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 8223 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 8224 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 8225 8226 // If src is zero (i.e. bsr sets ZF), returns NumBits. 8227 SDValue Ops[] = { 8228 Op, 8229 DAG.getConstant(NumBits+NumBits-1, OpVT), 8230 DAG.getConstant(X86::COND_E, MVT::i8), 8231 Op.getValue(1) 8232 }; 8233 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); 8234 8235 // Finally xor with NumBits-1. 8236 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 8237 8238 if (VT == MVT::i8) 8239 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 8240 return Op; 8241} 8242 8243SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const { 8244 EVT VT = Op.getValueType(); 8245 EVT OpVT = VT; 8246 unsigned NumBits = VT.getSizeInBits(); 8247 DebugLoc dl = Op.getDebugLoc(); 8248 8249 Op = Op.getOperand(0); 8250 if (VT == MVT::i8) { 8251 OpVT = MVT::i32; 8252 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 8253 } 8254 8255 // Issue a bsf (scan bits forward) which also sets EFLAGS. 8256 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 8257 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op); 8258 8259 // If src is zero (i.e. bsf sets ZF), returns NumBits. 8260 SDValue Ops[] = { 8261 Op, 8262 DAG.getConstant(NumBits, OpVT), 8263 DAG.getConstant(X86::COND_E, MVT::i8), 8264 Op.getValue(1) 8265 }; 8266 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); 8267 8268 if (VT == MVT::i8) 8269 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 8270 return Op; 8271} 8272 8273SDValue X86TargetLowering::LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) const { 8274 EVT VT = Op.getValueType(); 8275 assert(VT == MVT::v2i64 && "Only know how to lower V2I64 multiply"); 8276 DebugLoc dl = Op.getDebugLoc(); 8277 8278 // ulong2 Ahi = __builtin_ia32_psrlqi128( a, 32); 8279 // ulong2 Bhi = __builtin_ia32_psrlqi128( b, 32); 8280 // ulong2 AloBlo = __builtin_ia32_pmuludq128( a, b ); 8281 // ulong2 AloBhi = __builtin_ia32_pmuludq128( a, Bhi ); 8282 // ulong2 AhiBlo = __builtin_ia32_pmuludq128( Ahi, b ); 8283 // 8284 // AloBhi = __builtin_ia32_psllqi128( AloBhi, 32 ); 8285 // AhiBlo = __builtin_ia32_psllqi128( AhiBlo, 32 ); 8286 // return AloBlo + AloBhi + AhiBlo; 8287 8288 SDValue A = Op.getOperand(0); 8289 SDValue B = Op.getOperand(1); 8290 8291 SDValue Ahi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8292 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32), 8293 A, DAG.getConstant(32, MVT::i32)); 8294 SDValue Bhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8295 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32), 8296 B, DAG.getConstant(32, MVT::i32)); 8297 SDValue AloBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8298 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32), 8299 A, B); 8300 SDValue AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8301 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32), 8302 A, Bhi); 8303 SDValue AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8304 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32), 8305 Ahi, B); 8306 AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8307 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32), 8308 AloBhi, DAG.getConstant(32, MVT::i32)); 8309 AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8310 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32), 8311 AhiBlo, DAG.getConstant(32, MVT::i32)); 8312 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi); 8313 Res = DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo); 8314 return Res; 8315} 8316 8317SDValue X86TargetLowering::LowerSHL(SDValue Op, SelectionDAG &DAG) const { 8318 EVT VT = Op.getValueType(); 8319 DebugLoc dl = Op.getDebugLoc(); 8320 SDValue R = Op.getOperand(0); 8321 8322 LLVMContext *Context = DAG.getContext(); 8323 8324 assert(Subtarget->hasSSE41() && "Cannot lower SHL without SSE4.1 or later"); 8325 8326 if (VT == MVT::v4i32) { 8327 Op = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8328 DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32), 8329 Op.getOperand(1), DAG.getConstant(23, MVT::i32)); 8330 8331 ConstantInt *CI = ConstantInt::get(*Context, APInt(32, 0x3f800000U)); 8332 8333 std::vector<Constant*> CV(4, CI); 8334 Constant *C = ConstantVector::get(CV); 8335 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8336 SDValue Addend = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8337 MachinePointerInfo::getConstantPool(), 8338 false, false, 16); 8339 8340 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend); 8341 Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32, Op); 8342 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op); 8343 return DAG.getNode(ISD::MUL, dl, VT, Op, R); 8344 } 8345 if (VT == MVT::v16i8) { 8346 // a = a << 5; 8347 Op = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8348 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), 8349 Op.getOperand(1), DAG.getConstant(5, MVT::i32)); 8350 8351 ConstantInt *CM1 = ConstantInt::get(*Context, APInt(8, 15)); 8352 ConstantInt *CM2 = ConstantInt::get(*Context, APInt(8, 63)); 8353 8354 std::vector<Constant*> CVM1(16, CM1); 8355 std::vector<Constant*> CVM2(16, CM2); 8356 Constant *C = ConstantVector::get(CVM1); 8357 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8358 SDValue M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8359 MachinePointerInfo::getConstantPool(), 8360 false, false, 16); 8361 8362 // r = pblendv(r, psllw(r & (char16)15, 4), a); 8363 M = DAG.getNode(ISD::AND, dl, VT, R, M); 8364 M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8365 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M, 8366 DAG.getConstant(4, MVT::i32)); 8367 R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8368 DAG.getConstant(Intrinsic::x86_sse41_pblendvb, MVT::i32), 8369 R, M, Op); 8370 // a += a 8371 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 8372 8373 C = ConstantVector::get(CVM2); 8374 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8375 M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8376 MachinePointerInfo::getConstantPool(), 8377 false, false, 16); 8378 8379 // r = pblendv(r, psllw(r & (char16)63, 2), a); 8380 M = DAG.getNode(ISD::AND, dl, VT, R, M); 8381 M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8382 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M, 8383 DAG.getConstant(2, MVT::i32)); 8384 R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8385 DAG.getConstant(Intrinsic::x86_sse41_pblendvb, MVT::i32), 8386 R, M, Op); 8387 // a += a 8388 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 8389 8390 // return pblendv(r, r+r, a); 8391 R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8392 DAG.getConstant(Intrinsic::x86_sse41_pblendvb, MVT::i32), 8393 R, DAG.getNode(ISD::ADD, dl, VT, R, R), Op); 8394 return R; 8395 } 8396 return SDValue(); 8397} 8398 8399SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const { 8400 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus 8401 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering 8402 // looks for this combo and may remove the "setcc" instruction if the "setcc" 8403 // has only one use. 8404 SDNode *N = Op.getNode(); 8405 SDValue LHS = N->getOperand(0); 8406 SDValue RHS = N->getOperand(1); 8407 unsigned BaseOp = 0; 8408 unsigned Cond = 0; 8409 DebugLoc dl = Op.getDebugLoc(); 8410 8411 switch (Op.getOpcode()) { 8412 default: llvm_unreachable("Unknown ovf instruction!"); 8413 case ISD::SADDO: 8414 // A subtract of one will be selected as a INC. Note that INC doesn't 8415 // set CF, so we can't do this for UADDO. 8416 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 8417 if (C->getAPIntValue() == 1) { 8418 BaseOp = X86ISD::INC; 8419 Cond = X86::COND_O; 8420 break; 8421 } 8422 BaseOp = X86ISD::ADD; 8423 Cond = X86::COND_O; 8424 break; 8425 case ISD::UADDO: 8426 BaseOp = X86ISD::ADD; 8427 Cond = X86::COND_B; 8428 break; 8429 case ISD::SSUBO: 8430 // A subtract of one will be selected as a DEC. Note that DEC doesn't 8431 // set CF, so we can't do this for USUBO. 8432 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 8433 if (C->getAPIntValue() == 1) { 8434 BaseOp = X86ISD::DEC; 8435 Cond = X86::COND_O; 8436 break; 8437 } 8438 BaseOp = X86ISD::SUB; 8439 Cond = X86::COND_O; 8440 break; 8441 case ISD::USUBO: 8442 BaseOp = X86ISD::SUB; 8443 Cond = X86::COND_B; 8444 break; 8445 case ISD::SMULO: 8446 BaseOp = X86ISD::SMUL; 8447 Cond = X86::COND_O; 8448 break; 8449 case ISD::UMULO: 8450 BaseOp = X86ISD::UMUL; 8451 Cond = X86::COND_B; 8452 break; 8453 } 8454 8455 // Also sets EFLAGS. 8456 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); 8457 SDValue Sum = DAG.getNode(BaseOp, dl, VTs, LHS, RHS); 8458 8459 SDValue SetCC = 8460 DAG.getNode(X86ISD::SETCC, dl, N->getValueType(1), 8461 DAG.getConstant(Cond, MVT::i32), SDValue(Sum.getNode(), 1)); 8462 8463 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), SetCC); 8464 return Sum; 8465} 8466 8467SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{ 8468 DebugLoc dl = Op.getDebugLoc(); 8469 8470 if (!Subtarget->hasSSE2()) { 8471 SDValue Chain = Op.getOperand(0); 8472 SDValue Zero = DAG.getConstant(0, 8473 Subtarget->is64Bit() ? MVT::i64 : MVT::i32); 8474 SDValue Ops[] = { 8475 DAG.getRegister(X86::ESP, MVT::i32), // Base 8476 DAG.getTargetConstant(1, MVT::i8), // Scale 8477 DAG.getRegister(0, MVT::i32), // Index 8478 DAG.getTargetConstant(0, MVT::i32), // Disp 8479 DAG.getRegister(0, MVT::i32), // Segment. 8480 Zero, 8481 Chain 8482 }; 8483 SDNode *Res = 8484 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, 8485 array_lengthof(Ops)); 8486 return SDValue(Res, 0); 8487 } 8488 8489 unsigned isDev = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); 8490 if (!isDev) 8491 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 8492 8493 unsigned Op1 = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 8494 unsigned Op2 = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 8495 unsigned Op3 = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 8496 unsigned Op4 = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 8497 8498 // def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>; 8499 if (!Op1 && !Op2 && !Op3 && Op4) 8500 return DAG.getNode(X86ISD::SFENCE, dl, MVT::Other, Op.getOperand(0)); 8501 8502 // def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>; 8503 if (Op1 && !Op2 && !Op3 && !Op4) 8504 return DAG.getNode(X86ISD::LFENCE, dl, MVT::Other, Op.getOperand(0)); 8505 8506 // def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)), 8507 // (MFENCE)>; 8508 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 8509} 8510 8511SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 8512 EVT T = Op.getValueType(); 8513 DebugLoc DL = Op.getDebugLoc(); 8514 unsigned Reg = 0; 8515 unsigned size = 0; 8516 switch(T.getSimpleVT().SimpleTy) { 8517 default: 8518 assert(false && "Invalid value type!"); 8519 case MVT::i8: Reg = X86::AL; size = 1; break; 8520 case MVT::i16: Reg = X86::AX; size = 2; break; 8521 case MVT::i32: Reg = X86::EAX; size = 4; break; 8522 case MVT::i64: 8523 assert(Subtarget->is64Bit() && "Node not type legal!"); 8524 Reg = X86::RAX; size = 8; 8525 break; 8526 } 8527 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg, 8528 Op.getOperand(2), SDValue()); 8529 SDValue Ops[] = { cpIn.getValue(0), 8530 Op.getOperand(1), 8531 Op.getOperand(3), 8532 DAG.getTargetConstant(size, MVT::i8), 8533 cpIn.getValue(1) }; 8534 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 8535 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand(); 8536 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys, 8537 Ops, 5, T, MMO); 8538 SDValue cpOut = 8539 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1)); 8540 return cpOut; 8541} 8542 8543SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op, 8544 SelectionDAG &DAG) const { 8545 assert(Subtarget->is64Bit() && "Result not type legalized?"); 8546 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 8547 SDValue TheChain = Op.getOperand(0); 8548 DebugLoc dl = Op.getDebugLoc(); 8549 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 8550 SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1)); 8551 SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64, 8552 rax.getValue(2)); 8553 SDValue Tmp = DAG.getNode(ISD::SHL, dl, MVT::i64, rdx, 8554 DAG.getConstant(32, MVT::i8)); 8555 SDValue Ops[] = { 8556 DAG.getNode(ISD::OR, dl, MVT::i64, rax, Tmp), 8557 rdx.getValue(1) 8558 }; 8559 return DAG.getMergeValues(Ops, 2, dl); 8560} 8561 8562SDValue X86TargetLowering::LowerBIT_CONVERT(SDValue Op, 8563 SelectionDAG &DAG) const { 8564 EVT SrcVT = Op.getOperand(0).getValueType(); 8565 EVT DstVT = Op.getValueType(); 8566 assert((Subtarget->is64Bit() && !Subtarget->hasSSE2() && 8567 Subtarget->hasMMX() && !DisableMMX) && 8568 "Unexpected custom BIT_CONVERT"); 8569 assert((DstVT == MVT::i64 || 8570 (DstVT.isVector() && DstVT.getSizeInBits()==64)) && 8571 "Unexpected custom BIT_CONVERT"); 8572 // i64 <=> MMX conversions are Legal. 8573 if (SrcVT==MVT::i64 && DstVT.isVector()) 8574 return Op; 8575 if (DstVT==MVT::i64 && SrcVT.isVector()) 8576 return Op; 8577 // MMX <=> MMX conversions are Legal. 8578 if (SrcVT.isVector() && DstVT.isVector()) 8579 return Op; 8580 // All other conversions need to be expanded. 8581 return SDValue(); 8582} 8583SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const { 8584 SDNode *Node = Op.getNode(); 8585 DebugLoc dl = Node->getDebugLoc(); 8586 EVT T = Node->getValueType(0); 8587 SDValue negOp = DAG.getNode(ISD::SUB, dl, T, 8588 DAG.getConstant(0, T), Node->getOperand(2)); 8589 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, 8590 cast<AtomicSDNode>(Node)->getMemoryVT(), 8591 Node->getOperand(0), 8592 Node->getOperand(1), negOp, 8593 cast<AtomicSDNode>(Node)->getSrcValue(), 8594 cast<AtomicSDNode>(Node)->getAlignment()); 8595} 8596 8597/// LowerOperation - Provide custom lowering hooks for some operations. 8598/// 8599SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 8600 switch (Op.getOpcode()) { 8601 default: llvm_unreachable("Should not custom lower this!"); 8602 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op,DAG); 8603 case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG); 8604 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG); 8605 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 8606 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 8607 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 8608 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 8609 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 8610 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 8611 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 8612 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 8613 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 8614 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 8615 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 8616 case ISD::SHL_PARTS: 8617 case ISD::SRA_PARTS: 8618 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 8619 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 8620 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 8621 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 8622 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 8623 case ISD::FABS: return LowerFABS(Op, DAG); 8624 case ISD::FNEG: return LowerFNEG(Op, DAG); 8625 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 8626 case ISD::SETCC: return LowerSETCC(Op, DAG); 8627 case ISD::VSETCC: return LowerVSETCC(Op, DAG); 8628 case ISD::SELECT: return LowerSELECT(Op, DAG); 8629 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 8630 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 8631 case ISD::VASTART: return LowerVASTART(Op, DAG); 8632 case ISD::VAARG: return LowerVAARG(Op, DAG); 8633 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 8634 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 8635 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 8636 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 8637 case ISD::FRAME_TO_ARGS_OFFSET: 8638 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 8639 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 8640 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 8641 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 8642 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 8643 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 8644 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 8645 case ISD::MUL: return LowerMUL_V2I64(Op, DAG); 8646 case ISD::SHL: return LowerSHL(Op, DAG); 8647 case ISD::SADDO: 8648 case ISD::UADDO: 8649 case ISD::SSUBO: 8650 case ISD::USUBO: 8651 case ISD::SMULO: 8652 case ISD::UMULO: return LowerXALUO(Op, DAG); 8653 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG); 8654 case ISD::BIT_CONVERT: return LowerBIT_CONVERT(Op, DAG); 8655 } 8656} 8657 8658void X86TargetLowering:: 8659ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results, 8660 SelectionDAG &DAG, unsigned NewOp) const { 8661 EVT T = Node->getValueType(0); 8662 DebugLoc dl = Node->getDebugLoc(); 8663 assert (T == MVT::i64 && "Only know how to expand i64 atomics"); 8664 8665 SDValue Chain = Node->getOperand(0); 8666 SDValue In1 = Node->getOperand(1); 8667 SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 8668 Node->getOperand(2), DAG.getIntPtrConstant(0)); 8669 SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 8670 Node->getOperand(2), DAG.getIntPtrConstant(1)); 8671 SDValue Ops[] = { Chain, In1, In2L, In2H }; 8672 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 8673 SDValue Result = 8674 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops, 4, MVT::i64, 8675 cast<MemSDNode>(Node)->getMemOperand()); 8676 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)}; 8677 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 8678 Results.push_back(Result.getValue(2)); 8679} 8680 8681/// ReplaceNodeResults - Replace a node with an illegal result type 8682/// with a new node built out of custom code. 8683void X86TargetLowering::ReplaceNodeResults(SDNode *N, 8684 SmallVectorImpl<SDValue>&Results, 8685 SelectionDAG &DAG) const { 8686 DebugLoc dl = N->getDebugLoc(); 8687 switch (N->getOpcode()) { 8688 default: 8689 assert(false && "Do not know how to custom type legalize this operation!"); 8690 return; 8691 case ISD::FP_TO_SINT: { 8692 std::pair<SDValue,SDValue> Vals = 8693 FP_TO_INTHelper(SDValue(N, 0), DAG, true); 8694 SDValue FIST = Vals.first, StackSlot = Vals.second; 8695 if (FIST.getNode() != 0) { 8696 EVT VT = N->getValueType(0); 8697 // Return a load from the stack slot. 8698 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, 8699 MachinePointerInfo(), false, false, 0)); 8700 } 8701 return; 8702 } 8703 case ISD::READCYCLECOUNTER: { 8704 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 8705 SDValue TheChain = N->getOperand(0); 8706 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 8707 SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32, 8708 rd.getValue(1)); 8709 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, MVT::i32, 8710 eax.getValue(2)); 8711 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 8712 SDValue Ops[] = { eax, edx }; 8713 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops, 2)); 8714 Results.push_back(edx.getValue(1)); 8715 return; 8716 } 8717 case ISD::ATOMIC_CMP_SWAP: { 8718 EVT T = N->getValueType(0); 8719 assert (T == MVT::i64 && "Only know how to expand i64 Cmp and Swap"); 8720 SDValue cpInL, cpInH; 8721 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(2), 8722 DAG.getConstant(0, MVT::i32)); 8723 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(2), 8724 DAG.getConstant(1, MVT::i32)); 8725 cpInL = DAG.getCopyToReg(N->getOperand(0), dl, X86::EAX, cpInL, SDValue()); 8726 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl, X86::EDX, cpInH, 8727 cpInL.getValue(1)); 8728 SDValue swapInL, swapInH; 8729 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(3), 8730 DAG.getConstant(0, MVT::i32)); 8731 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(3), 8732 DAG.getConstant(1, MVT::i32)); 8733 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl, X86::EBX, swapInL, 8734 cpInH.getValue(1)); 8735 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl, X86::ECX, swapInH, 8736 swapInL.getValue(1)); 8737 SDValue Ops[] = { swapInH.getValue(0), 8738 N->getOperand(1), 8739 swapInH.getValue(1) }; 8740 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 8741 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 8742 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG8_DAG, dl, Tys, 8743 Ops, 3, T, MMO); 8744 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, X86::EAX, 8745 MVT::i32, Result.getValue(1)); 8746 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl, X86::EDX, 8747 MVT::i32, cpOutL.getValue(2)); 8748 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 8749 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 8750 Results.push_back(cpOutH.getValue(1)); 8751 return; 8752 } 8753 case ISD::ATOMIC_LOAD_ADD: 8754 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMADD64_DAG); 8755 return; 8756 case ISD::ATOMIC_LOAD_AND: 8757 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMAND64_DAG); 8758 return; 8759 case ISD::ATOMIC_LOAD_NAND: 8760 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMNAND64_DAG); 8761 return; 8762 case ISD::ATOMIC_LOAD_OR: 8763 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMOR64_DAG); 8764 return; 8765 case ISD::ATOMIC_LOAD_SUB: 8766 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSUB64_DAG); 8767 return; 8768 case ISD::ATOMIC_LOAD_XOR: 8769 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMXOR64_DAG); 8770 return; 8771 case ISD::ATOMIC_SWAP: 8772 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSWAP64_DAG); 8773 return; 8774 } 8775} 8776 8777const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 8778 switch (Opcode) { 8779 default: return NULL; 8780 case X86ISD::BSF: return "X86ISD::BSF"; 8781 case X86ISD::BSR: return "X86ISD::BSR"; 8782 case X86ISD::SHLD: return "X86ISD::SHLD"; 8783 case X86ISD::SHRD: return "X86ISD::SHRD"; 8784 case X86ISD::FAND: return "X86ISD::FAND"; 8785 case X86ISD::FOR: return "X86ISD::FOR"; 8786 case X86ISD::FXOR: return "X86ISD::FXOR"; 8787 case X86ISD::FSRL: return "X86ISD::FSRL"; 8788 case X86ISD::FILD: return "X86ISD::FILD"; 8789 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 8790 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 8791 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 8792 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 8793 case X86ISD::FLD: return "X86ISD::FLD"; 8794 case X86ISD::FST: return "X86ISD::FST"; 8795 case X86ISD::CALL: return "X86ISD::CALL"; 8796 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 8797 case X86ISD::BT: return "X86ISD::BT"; 8798 case X86ISD::CMP: return "X86ISD::CMP"; 8799 case X86ISD::COMI: return "X86ISD::COMI"; 8800 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 8801 case X86ISD::SETCC: return "X86ISD::SETCC"; 8802 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY"; 8803 case X86ISD::CMOV: return "X86ISD::CMOV"; 8804 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 8805 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 8806 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 8807 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 8808 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 8809 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 8810 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP"; 8811 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 8812 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 8813 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 8814 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 8815 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 8816 case X86ISD::PSHUFB: return "X86ISD::PSHUFB"; 8817 case X86ISD::FMAX: return "X86ISD::FMAX"; 8818 case X86ISD::FMIN: return "X86ISD::FMIN"; 8819 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 8820 case X86ISD::FRCP: return "X86ISD::FRCP"; 8821 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 8822 case X86ISD::TLSCALL: return "X86ISD::TLSCALL"; 8823 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 8824 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 8825 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 8826 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG"; 8827 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG"; 8828 case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG"; 8829 case X86ISD::ATOMSUB64_DAG: return "X86ISD::ATOMSUB64_DAG"; 8830 case X86ISD::ATOMOR64_DAG: return "X86ISD::ATOMOR64_DAG"; 8831 case X86ISD::ATOMXOR64_DAG: return "X86ISD::ATOMXOR64_DAG"; 8832 case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG"; 8833 case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG"; 8834 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL"; 8835 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD"; 8836 case X86ISD::VSHL: return "X86ISD::VSHL"; 8837 case X86ISD::VSRL: return "X86ISD::VSRL"; 8838 case X86ISD::CMPPD: return "X86ISD::CMPPD"; 8839 case X86ISD::CMPPS: return "X86ISD::CMPPS"; 8840 case X86ISD::PCMPEQB: return "X86ISD::PCMPEQB"; 8841 case X86ISD::PCMPEQW: return "X86ISD::PCMPEQW"; 8842 case X86ISD::PCMPEQD: return "X86ISD::PCMPEQD"; 8843 case X86ISD::PCMPEQQ: return "X86ISD::PCMPEQQ"; 8844 case X86ISD::PCMPGTB: return "X86ISD::PCMPGTB"; 8845 case X86ISD::PCMPGTW: return "X86ISD::PCMPGTW"; 8846 case X86ISD::PCMPGTD: return "X86ISD::PCMPGTD"; 8847 case X86ISD::PCMPGTQ: return "X86ISD::PCMPGTQ"; 8848 case X86ISD::ADD: return "X86ISD::ADD"; 8849 case X86ISD::SUB: return "X86ISD::SUB"; 8850 case X86ISD::SMUL: return "X86ISD::SMUL"; 8851 case X86ISD::UMUL: return "X86ISD::UMUL"; 8852 case X86ISD::INC: return "X86ISD::INC"; 8853 case X86ISD::DEC: return "X86ISD::DEC"; 8854 case X86ISD::OR: return "X86ISD::OR"; 8855 case X86ISD::XOR: return "X86ISD::XOR"; 8856 case X86ISD::AND: return "X86ISD::AND"; 8857 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM"; 8858 case X86ISD::PTEST: return "X86ISD::PTEST"; 8859 case X86ISD::TESTP: return "X86ISD::TESTP"; 8860 case X86ISD::PALIGN: return "X86ISD::PALIGN"; 8861 case X86ISD::PSHUFD: return "X86ISD::PSHUFD"; 8862 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW"; 8863 case X86ISD::PSHUFHW_LD: return "X86ISD::PSHUFHW_LD"; 8864 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW"; 8865 case X86ISD::PSHUFLW_LD: return "X86ISD::PSHUFLW_LD"; 8866 case X86ISD::SHUFPS: return "X86ISD::SHUFPS"; 8867 case X86ISD::SHUFPD: return "X86ISD::SHUFPD"; 8868 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS"; 8869 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD"; 8870 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS"; 8871 case X86ISD::MOVHLPD: return "X86ISD::MOVHLPD"; 8872 case X86ISD::MOVLPS: return "X86ISD::MOVLPS"; 8873 case X86ISD::MOVLPD: return "X86ISD::MOVLPD"; 8874 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP"; 8875 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP"; 8876 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP"; 8877 case X86ISD::MOVSHDUP_LD: return "X86ISD::MOVSHDUP_LD"; 8878 case X86ISD::MOVSLDUP_LD: return "X86ISD::MOVSLDUP_LD"; 8879 case X86ISD::MOVSD: return "X86ISD::MOVSD"; 8880 case X86ISD::MOVSS: return "X86ISD::MOVSS"; 8881 case X86ISD::UNPCKLPS: return "X86ISD::UNPCKLPS"; 8882 case X86ISD::UNPCKLPD: return "X86ISD::UNPCKLPD"; 8883 case X86ISD::UNPCKHPS: return "X86ISD::UNPCKHPS"; 8884 case X86ISD::UNPCKHPD: return "X86ISD::UNPCKHPD"; 8885 case X86ISD::PUNPCKLBW: return "X86ISD::PUNPCKLBW"; 8886 case X86ISD::PUNPCKLWD: return "X86ISD::PUNPCKLWD"; 8887 case X86ISD::PUNPCKLDQ: return "X86ISD::PUNPCKLDQ"; 8888 case X86ISD::PUNPCKLQDQ: return "X86ISD::PUNPCKLQDQ"; 8889 case X86ISD::PUNPCKHBW: return "X86ISD::PUNPCKHBW"; 8890 case X86ISD::PUNPCKHWD: return "X86ISD::PUNPCKHWD"; 8891 case X86ISD::PUNPCKHDQ: return "X86ISD::PUNPCKHDQ"; 8892 case X86ISD::PUNPCKHQDQ: return "X86ISD::PUNPCKHQDQ"; 8893 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS"; 8894 case X86ISD::VAARG_64: return "X86ISD::VAARG_64"; 8895 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA"; 8896 } 8897} 8898 8899// isLegalAddressingMode - Return true if the addressing mode represented 8900// by AM is legal for this target, for a load/store of the specified type. 8901bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 8902 const Type *Ty) const { 8903 // X86 supports extremely general addressing modes. 8904 CodeModel::Model M = getTargetMachine().getCodeModel(); 8905 Reloc::Model R = getTargetMachine().getRelocationModel(); 8906 8907 // X86 allows a sign-extended 32-bit immediate field as a displacement. 8908 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL)) 8909 return false; 8910 8911 if (AM.BaseGV) { 8912 unsigned GVFlags = 8913 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine()); 8914 8915 // If a reference to this global requires an extra load, we can't fold it. 8916 if (isGlobalStubReference(GVFlags)) 8917 return false; 8918 8919 // If BaseGV requires a register for the PIC base, we cannot also have a 8920 // BaseReg specified. 8921 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags)) 8922 return false; 8923 8924 // If lower 4G is not available, then we must use rip-relative addressing. 8925 if ((M != CodeModel::Small || R != Reloc::Static) && 8926 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1)) 8927 return false; 8928 } 8929 8930 switch (AM.Scale) { 8931 case 0: 8932 case 1: 8933 case 2: 8934 case 4: 8935 case 8: 8936 // These scales always work. 8937 break; 8938 case 3: 8939 case 5: 8940 case 9: 8941 // These scales are formed with basereg+scalereg. Only accept if there is 8942 // no basereg yet. 8943 if (AM.HasBaseReg) 8944 return false; 8945 break; 8946 default: // Other stuff never works. 8947 return false; 8948 } 8949 8950 return true; 8951} 8952 8953 8954bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const { 8955 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 8956 return false; 8957 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 8958 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 8959 if (NumBits1 <= NumBits2) 8960 return false; 8961 return true; 8962} 8963 8964bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 8965 if (!VT1.isInteger() || !VT2.isInteger()) 8966 return false; 8967 unsigned NumBits1 = VT1.getSizeInBits(); 8968 unsigned NumBits2 = VT2.getSizeInBits(); 8969 if (NumBits1 <= NumBits2) 8970 return false; 8971 return true; 8972} 8973 8974bool X86TargetLowering::isZExtFree(const Type *Ty1, const Type *Ty2) const { 8975 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 8976 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit(); 8977} 8978 8979bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { 8980 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 8981 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit(); 8982} 8983 8984bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const { 8985 // i16 instructions are longer (0x66 prefix) and potentially slower. 8986 return !(VT1 == MVT::i32 && VT2 == MVT::i16); 8987} 8988 8989/// isShuffleMaskLegal - Targets can use this to indicate that they only 8990/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 8991/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 8992/// are assumed to be legal. 8993bool 8994X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 8995 EVT VT) const { 8996 // Very little shuffling can be done for 64-bit vectors right now. 8997 if (VT.getSizeInBits() == 64) 8998 return isPALIGNRMask(M, VT, Subtarget->hasSSSE3()); 8999 9000 // FIXME: pshufb, blends, shifts. 9001 return (VT.getVectorNumElements() == 2 || 9002 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 9003 isMOVLMask(M, VT) || 9004 isSHUFPMask(M, VT) || 9005 isPSHUFDMask(M, VT) || 9006 isPSHUFHWMask(M, VT) || 9007 isPSHUFLWMask(M, VT) || 9008 isPALIGNRMask(M, VT, Subtarget->hasSSSE3()) || 9009 isUNPCKLMask(M, VT) || 9010 isUNPCKHMask(M, VT) || 9011 isUNPCKL_v_undef_Mask(M, VT) || 9012 isUNPCKH_v_undef_Mask(M, VT)); 9013} 9014 9015bool 9016X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, 9017 EVT VT) const { 9018 unsigned NumElts = VT.getVectorNumElements(); 9019 // FIXME: This collection of masks seems suspect. 9020 if (NumElts == 2) 9021 return true; 9022 if (NumElts == 4 && VT.getSizeInBits() == 128) { 9023 return (isMOVLMask(Mask, VT) || 9024 isCommutedMOVLMask(Mask, VT, true) || 9025 isSHUFPMask(Mask, VT) || 9026 isCommutedSHUFPMask(Mask, VT)); 9027 } 9028 return false; 9029} 9030 9031//===----------------------------------------------------------------------===// 9032// X86 Scheduler Hooks 9033//===----------------------------------------------------------------------===// 9034 9035// private utility function 9036MachineBasicBlock * 9037X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr, 9038 MachineBasicBlock *MBB, 9039 unsigned regOpc, 9040 unsigned immOpc, 9041 unsigned LoadOpc, 9042 unsigned CXchgOpc, 9043 unsigned notOpc, 9044 unsigned EAXreg, 9045 TargetRegisterClass *RC, 9046 bool invSrc) const { 9047 // For the atomic bitwise operator, we generate 9048 // thisMBB: 9049 // newMBB: 9050 // ld t1 = [bitinstr.addr] 9051 // op t2 = t1, [bitinstr.val] 9052 // mov EAX = t1 9053 // lcs dest = [bitinstr.addr], t2 [EAX is implicit] 9054 // bz newMBB 9055 // fallthrough -->nextMBB 9056 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9057 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 9058 MachineFunction::iterator MBBIter = MBB; 9059 ++MBBIter; 9060 9061 /// First build the CFG 9062 MachineFunction *F = MBB->getParent(); 9063 MachineBasicBlock *thisMBB = MBB; 9064 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 9065 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 9066 F->insert(MBBIter, newMBB); 9067 F->insert(MBBIter, nextMBB); 9068 9069 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 9070 nextMBB->splice(nextMBB->begin(), thisMBB, 9071 llvm::next(MachineBasicBlock::iterator(bInstr)), 9072 thisMBB->end()); 9073 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 9074 9075 // Update thisMBB to fall through to newMBB 9076 thisMBB->addSuccessor(newMBB); 9077 9078 // newMBB jumps to itself and fall through to nextMBB 9079 newMBB->addSuccessor(nextMBB); 9080 newMBB->addSuccessor(newMBB); 9081 9082 // Insert instructions into newMBB based on incoming instruction 9083 assert(bInstr->getNumOperands() < X86::AddrNumOperands + 4 && 9084 "unexpected number of operands"); 9085 DebugLoc dl = bInstr->getDebugLoc(); 9086 MachineOperand& destOper = bInstr->getOperand(0); 9087 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 9088 int numArgs = bInstr->getNumOperands() - 1; 9089 for (int i=0; i < numArgs; ++i) 9090 argOpers[i] = &bInstr->getOperand(i+1); 9091 9092 // x86 address has 4 operands: base, index, scale, and displacement 9093 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 9094 int valArgIndx = lastAddrIndx + 1; 9095 9096 unsigned t1 = F->getRegInfo().createVirtualRegister(RC); 9097 MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(LoadOpc), t1); 9098 for (int i=0; i <= lastAddrIndx; ++i) 9099 (*MIB).addOperand(*argOpers[i]); 9100 9101 unsigned tt = F->getRegInfo().createVirtualRegister(RC); 9102 if (invSrc) { 9103 MIB = BuildMI(newMBB, dl, TII->get(notOpc), tt).addReg(t1); 9104 } 9105 else 9106 tt = t1; 9107 9108 unsigned t2 = F->getRegInfo().createVirtualRegister(RC); 9109 assert((argOpers[valArgIndx]->isReg() || 9110 argOpers[valArgIndx]->isImm()) && 9111 "invalid operand"); 9112 if (argOpers[valArgIndx]->isReg()) 9113 MIB = BuildMI(newMBB, dl, TII->get(regOpc), t2); 9114 else 9115 MIB = BuildMI(newMBB, dl, TII->get(immOpc), t2); 9116 MIB.addReg(tt); 9117 (*MIB).addOperand(*argOpers[valArgIndx]); 9118 9119 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), EAXreg); 9120 MIB.addReg(t1); 9121 9122 MIB = BuildMI(newMBB, dl, TII->get(CXchgOpc)); 9123 for (int i=0; i <= lastAddrIndx; ++i) 9124 (*MIB).addOperand(*argOpers[i]); 9125 MIB.addReg(t2); 9126 assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 9127 (*MIB).setMemRefs(bInstr->memoperands_begin(), 9128 bInstr->memoperands_end()); 9129 9130 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg()); 9131 MIB.addReg(EAXreg); 9132 9133 // insert branch 9134 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 9135 9136 bInstr->eraseFromParent(); // The pseudo instruction is gone now. 9137 return nextMBB; 9138} 9139 9140// private utility function: 64 bit atomics on 32 bit host. 9141MachineBasicBlock * 9142X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr, 9143 MachineBasicBlock *MBB, 9144 unsigned regOpcL, 9145 unsigned regOpcH, 9146 unsigned immOpcL, 9147 unsigned immOpcH, 9148 bool invSrc) const { 9149 // For the atomic bitwise operator, we generate 9150 // thisMBB (instructions are in pairs, except cmpxchg8b) 9151 // ld t1,t2 = [bitinstr.addr] 9152 // newMBB: 9153 // out1, out2 = phi (thisMBB, t1/t2) (newMBB, t3/t4) 9154 // op t5, t6 <- out1, out2, [bitinstr.val] 9155 // (for SWAP, substitute: mov t5, t6 <- [bitinstr.val]) 9156 // mov ECX, EBX <- t5, t6 9157 // mov EAX, EDX <- t1, t2 9158 // cmpxchg8b [bitinstr.addr] [EAX, EDX, EBX, ECX implicit] 9159 // mov t3, t4 <- EAX, EDX 9160 // bz newMBB 9161 // result in out1, out2 9162 // fallthrough -->nextMBB 9163 9164 const TargetRegisterClass *RC = X86::GR32RegisterClass; 9165 const unsigned LoadOpc = X86::MOV32rm; 9166 const unsigned NotOpc = X86::NOT32r; 9167 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9168 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 9169 MachineFunction::iterator MBBIter = MBB; 9170 ++MBBIter; 9171 9172 /// First build the CFG 9173 MachineFunction *F = MBB->getParent(); 9174 MachineBasicBlock *thisMBB = MBB; 9175 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 9176 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 9177 F->insert(MBBIter, newMBB); 9178 F->insert(MBBIter, nextMBB); 9179 9180 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 9181 nextMBB->splice(nextMBB->begin(), thisMBB, 9182 llvm::next(MachineBasicBlock::iterator(bInstr)), 9183 thisMBB->end()); 9184 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 9185 9186 // Update thisMBB to fall through to newMBB 9187 thisMBB->addSuccessor(newMBB); 9188 9189 // newMBB jumps to itself and fall through to nextMBB 9190 newMBB->addSuccessor(nextMBB); 9191 newMBB->addSuccessor(newMBB); 9192 9193 DebugLoc dl = bInstr->getDebugLoc(); 9194 // Insert instructions into newMBB based on incoming instruction 9195 // There are 8 "real" operands plus 9 implicit def/uses, ignored here. 9196 assert(bInstr->getNumOperands() < X86::AddrNumOperands + 14 && 9197 "unexpected number of operands"); 9198 MachineOperand& dest1Oper = bInstr->getOperand(0); 9199 MachineOperand& dest2Oper = bInstr->getOperand(1); 9200 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 9201 for (int i=0; i < 2 + X86::AddrNumOperands; ++i) { 9202 argOpers[i] = &bInstr->getOperand(i+2); 9203 9204 // We use some of the operands multiple times, so conservatively just 9205 // clear any kill flags that might be present. 9206 if (argOpers[i]->isReg() && argOpers[i]->isUse()) 9207 argOpers[i]->setIsKill(false); 9208 } 9209 9210 // x86 address has 5 operands: base, index, scale, displacement, and segment. 9211 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 9212 9213 unsigned t1 = F->getRegInfo().createVirtualRegister(RC); 9214 MachineInstrBuilder MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t1); 9215 for (int i=0; i <= lastAddrIndx; ++i) 9216 (*MIB).addOperand(*argOpers[i]); 9217 unsigned t2 = F->getRegInfo().createVirtualRegister(RC); 9218 MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t2); 9219 // add 4 to displacement. 9220 for (int i=0; i <= lastAddrIndx-2; ++i) 9221 (*MIB).addOperand(*argOpers[i]); 9222 MachineOperand newOp3 = *(argOpers[3]); 9223 if (newOp3.isImm()) 9224 newOp3.setImm(newOp3.getImm()+4); 9225 else 9226 newOp3.setOffset(newOp3.getOffset()+4); 9227 (*MIB).addOperand(newOp3); 9228 (*MIB).addOperand(*argOpers[lastAddrIndx]); 9229 9230 // t3/4 are defined later, at the bottom of the loop 9231 unsigned t3 = F->getRegInfo().createVirtualRegister(RC); 9232 unsigned t4 = F->getRegInfo().createVirtualRegister(RC); 9233 BuildMI(newMBB, dl, TII->get(X86::PHI), dest1Oper.getReg()) 9234 .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(newMBB); 9235 BuildMI(newMBB, dl, TII->get(X86::PHI), dest2Oper.getReg()) 9236 .addReg(t2).addMBB(thisMBB).addReg(t4).addMBB(newMBB); 9237 9238 // The subsequent operations should be using the destination registers of 9239 //the PHI instructions. 9240 if (invSrc) { 9241 t1 = F->getRegInfo().createVirtualRegister(RC); 9242 t2 = F->getRegInfo().createVirtualRegister(RC); 9243 MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t1).addReg(dest1Oper.getReg()); 9244 MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t2).addReg(dest2Oper.getReg()); 9245 } else { 9246 t1 = dest1Oper.getReg(); 9247 t2 = dest2Oper.getReg(); 9248 } 9249 9250 int valArgIndx = lastAddrIndx + 1; 9251 assert((argOpers[valArgIndx]->isReg() || 9252 argOpers[valArgIndx]->isImm()) && 9253 "invalid operand"); 9254 unsigned t5 = F->getRegInfo().createVirtualRegister(RC); 9255 unsigned t6 = F->getRegInfo().createVirtualRegister(RC); 9256 if (argOpers[valArgIndx]->isReg()) 9257 MIB = BuildMI(newMBB, dl, TII->get(regOpcL), t5); 9258 else 9259 MIB = BuildMI(newMBB, dl, TII->get(immOpcL), t5); 9260 if (regOpcL != X86::MOV32rr) 9261 MIB.addReg(t1); 9262 (*MIB).addOperand(*argOpers[valArgIndx]); 9263 assert(argOpers[valArgIndx + 1]->isReg() == 9264 argOpers[valArgIndx]->isReg()); 9265 assert(argOpers[valArgIndx + 1]->isImm() == 9266 argOpers[valArgIndx]->isImm()); 9267 if (argOpers[valArgIndx + 1]->isReg()) 9268 MIB = BuildMI(newMBB, dl, TII->get(regOpcH), t6); 9269 else 9270 MIB = BuildMI(newMBB, dl, TII->get(immOpcH), t6); 9271 if (regOpcH != X86::MOV32rr) 9272 MIB.addReg(t2); 9273 (*MIB).addOperand(*argOpers[valArgIndx + 1]); 9274 9275 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX); 9276 MIB.addReg(t1); 9277 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EDX); 9278 MIB.addReg(t2); 9279 9280 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EBX); 9281 MIB.addReg(t5); 9282 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::ECX); 9283 MIB.addReg(t6); 9284 9285 MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG8B)); 9286 for (int i=0; i <= lastAddrIndx; ++i) 9287 (*MIB).addOperand(*argOpers[i]); 9288 9289 assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 9290 (*MIB).setMemRefs(bInstr->memoperands_begin(), 9291 bInstr->memoperands_end()); 9292 9293 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t3); 9294 MIB.addReg(X86::EAX); 9295 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t4); 9296 MIB.addReg(X86::EDX); 9297 9298 // insert branch 9299 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 9300 9301 bInstr->eraseFromParent(); // The pseudo instruction is gone now. 9302 return nextMBB; 9303} 9304 9305// private utility function 9306MachineBasicBlock * 9307X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr, 9308 MachineBasicBlock *MBB, 9309 unsigned cmovOpc) const { 9310 // For the atomic min/max operator, we generate 9311 // thisMBB: 9312 // newMBB: 9313 // ld t1 = [min/max.addr] 9314 // mov t2 = [min/max.val] 9315 // cmp t1, t2 9316 // cmov[cond] t2 = t1 9317 // mov EAX = t1 9318 // lcs dest = [bitinstr.addr], t2 [EAX is implicit] 9319 // bz newMBB 9320 // fallthrough -->nextMBB 9321 // 9322 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9323 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 9324 MachineFunction::iterator MBBIter = MBB; 9325 ++MBBIter; 9326 9327 /// First build the CFG 9328 MachineFunction *F = MBB->getParent(); 9329 MachineBasicBlock *thisMBB = MBB; 9330 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 9331 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 9332 F->insert(MBBIter, newMBB); 9333 F->insert(MBBIter, nextMBB); 9334 9335 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 9336 nextMBB->splice(nextMBB->begin(), thisMBB, 9337 llvm::next(MachineBasicBlock::iterator(mInstr)), 9338 thisMBB->end()); 9339 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 9340 9341 // Update thisMBB to fall through to newMBB 9342 thisMBB->addSuccessor(newMBB); 9343 9344 // newMBB jumps to newMBB and fall through to nextMBB 9345 newMBB->addSuccessor(nextMBB); 9346 newMBB->addSuccessor(newMBB); 9347 9348 DebugLoc dl = mInstr->getDebugLoc(); 9349 // Insert instructions into newMBB based on incoming instruction 9350 assert(mInstr->getNumOperands() < X86::AddrNumOperands + 4 && 9351 "unexpected number of operands"); 9352 MachineOperand& destOper = mInstr->getOperand(0); 9353 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 9354 int numArgs = mInstr->getNumOperands() - 1; 9355 for (int i=0; i < numArgs; ++i) 9356 argOpers[i] = &mInstr->getOperand(i+1); 9357 9358 // x86 address has 4 operands: base, index, scale, and displacement 9359 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 9360 int valArgIndx = lastAddrIndx + 1; 9361 9362 unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 9363 MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rm), t1); 9364 for (int i=0; i <= lastAddrIndx; ++i) 9365 (*MIB).addOperand(*argOpers[i]); 9366 9367 // We only support register and immediate values 9368 assert((argOpers[valArgIndx]->isReg() || 9369 argOpers[valArgIndx]->isImm()) && 9370 "invalid operand"); 9371 9372 unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 9373 if (argOpers[valArgIndx]->isReg()) 9374 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t2); 9375 else 9376 MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), t2); 9377 (*MIB).addOperand(*argOpers[valArgIndx]); 9378 9379 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX); 9380 MIB.addReg(t1); 9381 9382 MIB = BuildMI(newMBB, dl, TII->get(X86::CMP32rr)); 9383 MIB.addReg(t1); 9384 MIB.addReg(t2); 9385 9386 // Generate movc 9387 unsigned t3 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 9388 MIB = BuildMI(newMBB, dl, TII->get(cmovOpc),t3); 9389 MIB.addReg(t2); 9390 MIB.addReg(t1); 9391 9392 // Cmp and exchange if none has modified the memory location 9393 MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG32)); 9394 for (int i=0; i <= lastAddrIndx; ++i) 9395 (*MIB).addOperand(*argOpers[i]); 9396 MIB.addReg(t3); 9397 assert(mInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 9398 (*MIB).setMemRefs(mInstr->memoperands_begin(), 9399 mInstr->memoperands_end()); 9400 9401 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg()); 9402 MIB.addReg(X86::EAX); 9403 9404 // insert branch 9405 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 9406 9407 mInstr->eraseFromParent(); // The pseudo instruction is gone now. 9408 return nextMBB; 9409} 9410 9411// FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8 9412// or XMM0_V32I8 in AVX all of this code can be replaced with that 9413// in the .td file. 9414MachineBasicBlock * 9415X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB, 9416 unsigned numArgs, bool memArg) const { 9417 9418 assert((Subtarget->hasSSE42() || Subtarget->hasAVX()) && 9419 "Target must have SSE4.2 or AVX features enabled"); 9420 9421 DebugLoc dl = MI->getDebugLoc(); 9422 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9423 9424 unsigned Opc; 9425 9426 if (!Subtarget->hasAVX()) { 9427 if (memArg) 9428 Opc = numArgs == 3 ? X86::PCMPISTRM128rm : X86::PCMPESTRM128rm; 9429 else 9430 Opc = numArgs == 3 ? X86::PCMPISTRM128rr : X86::PCMPESTRM128rr; 9431 } else { 9432 if (memArg) 9433 Opc = numArgs == 3 ? X86::VPCMPISTRM128rm : X86::VPCMPESTRM128rm; 9434 else 9435 Opc = numArgs == 3 ? X86::VPCMPISTRM128rr : X86::VPCMPESTRM128rr; 9436 } 9437 9438 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(Opc)); 9439 9440 for (unsigned i = 0; i < numArgs; ++i) { 9441 MachineOperand &Op = MI->getOperand(i+1); 9442 9443 if (!(Op.isReg() && Op.isImplicit())) 9444 MIB.addOperand(Op); 9445 } 9446 9447 BuildMI(BB, dl, TII->get(X86::MOVAPSrr), MI->getOperand(0).getReg()) 9448 .addReg(X86::XMM0); 9449 9450 MI->eraseFromParent(); 9451 9452 return BB; 9453} 9454 9455MachineBasicBlock * 9456X86TargetLowering::EmitVAARG64WithCustomInserter( 9457 MachineInstr *MI, 9458 MachineBasicBlock *MBB) const { 9459 // Emit va_arg instruction on X86-64. 9460 9461 // Operands to this pseudo-instruction: 9462 // 0 ) Output : destination address (reg) 9463 // 1-5) Input : va_list address (addr, i64mem) 9464 // 6 ) ArgSize : Size (in bytes) of vararg type 9465 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset 9466 // 8 ) Align : Alignment of type 9467 // 9 ) EFLAGS (implicit-def) 9468 9469 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!"); 9470 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands"); 9471 9472 unsigned DestReg = MI->getOperand(0).getReg(); 9473 MachineOperand &Base = MI->getOperand(1); 9474 MachineOperand &Scale = MI->getOperand(2); 9475 MachineOperand &Index = MI->getOperand(3); 9476 MachineOperand &Disp = MI->getOperand(4); 9477 MachineOperand &Segment = MI->getOperand(5); 9478 unsigned ArgSize = MI->getOperand(6).getImm(); 9479 unsigned ArgMode = MI->getOperand(7).getImm(); 9480 unsigned Align = MI->getOperand(8).getImm(); 9481 9482 // Memory Reference 9483 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand"); 9484 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 9485 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 9486 9487 // Machine Information 9488 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9489 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 9490 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64); 9491 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32); 9492 DebugLoc DL = MI->getDebugLoc(); 9493 9494 // struct va_list { 9495 // i32 gp_offset 9496 // i32 fp_offset 9497 // i64 overflow_area (address) 9498 // i64 reg_save_area (address) 9499 // } 9500 // sizeof(va_list) = 24 9501 // alignment(va_list) = 8 9502 9503 unsigned TotalNumIntRegs = 6; 9504 unsigned TotalNumXMMRegs = 8; 9505 bool UseGPOffset = (ArgMode == 1); 9506 bool UseFPOffset = (ArgMode == 2); 9507 unsigned MaxOffset = TotalNumIntRegs * 8 + 9508 (UseFPOffset ? TotalNumXMMRegs * 16 : 0); 9509 9510 /* Align ArgSize to a multiple of 8 */ 9511 unsigned ArgSizeA8 = (ArgSize + 7) & ~7; 9512 bool NeedsAlign = (Align > 8); 9513 9514 MachineBasicBlock *thisMBB = MBB; 9515 MachineBasicBlock *overflowMBB; 9516 MachineBasicBlock *offsetMBB; 9517 MachineBasicBlock *endMBB; 9518 9519 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB 9520 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB 9521 unsigned OffsetReg = 0; 9522 9523 if (!UseGPOffset && !UseFPOffset) { 9524 // If we only pull from the overflow region, we don't create a branch. 9525 // We don't need to alter control flow. 9526 OffsetDestReg = 0; // unused 9527 OverflowDestReg = DestReg; 9528 9529 offsetMBB = NULL; 9530 overflowMBB = thisMBB; 9531 endMBB = thisMBB; 9532 } else { 9533 // First emit code to check if gp_offset (or fp_offset) is below the bound. 9534 // If so, pull the argument from reg_save_area. (branch to offsetMBB) 9535 // If not, pull from overflow_area. (branch to overflowMBB) 9536 // 9537 // thisMBB 9538 // | . 9539 // | . 9540 // offsetMBB overflowMBB 9541 // | . 9542 // | . 9543 // endMBB 9544 9545 // Registers for the PHI in endMBB 9546 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass); 9547 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass); 9548 9549 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 9550 MachineFunction *MF = MBB->getParent(); 9551 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB); 9552 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB); 9553 endMBB = MF->CreateMachineBasicBlock(LLVM_BB); 9554 9555 MachineFunction::iterator MBBIter = MBB; 9556 ++MBBIter; 9557 9558 // Insert the new basic blocks 9559 MF->insert(MBBIter, offsetMBB); 9560 MF->insert(MBBIter, overflowMBB); 9561 MF->insert(MBBIter, endMBB); 9562 9563 // Transfer the remainder of MBB and its successor edges to endMBB. 9564 endMBB->splice(endMBB->begin(), thisMBB, 9565 llvm::next(MachineBasicBlock::iterator(MI)), 9566 thisMBB->end()); 9567 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 9568 9569 // Make offsetMBB and overflowMBB successors of thisMBB 9570 thisMBB->addSuccessor(offsetMBB); 9571 thisMBB->addSuccessor(overflowMBB); 9572 9573 // endMBB is a successor of both offsetMBB and overflowMBB 9574 offsetMBB->addSuccessor(endMBB); 9575 overflowMBB->addSuccessor(endMBB); 9576 9577 // Load the offset value into a register 9578 OffsetReg = MRI.createVirtualRegister(OffsetRegClass); 9579 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg) 9580 .addOperand(Base) 9581 .addOperand(Scale) 9582 .addOperand(Index) 9583 .addDisp(Disp, UseFPOffset ? 4 : 0) 9584 .addOperand(Segment) 9585 .setMemRefs(MMOBegin, MMOEnd); 9586 9587 // Check if there is enough room left to pull this argument. 9588 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri)) 9589 .addReg(OffsetReg) 9590 .addImm(MaxOffset + 8 - ArgSizeA8); 9591 9592 // Branch to "overflowMBB" if offset >= max 9593 // Fall through to "offsetMBB" otherwise 9594 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE))) 9595 .addMBB(overflowMBB); 9596 } 9597 9598 // In offsetMBB, emit code to use the reg_save_area. 9599 if (offsetMBB) { 9600 assert(OffsetReg != 0); 9601 9602 // Read the reg_save_area address. 9603 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass); 9604 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg) 9605 .addOperand(Base) 9606 .addOperand(Scale) 9607 .addOperand(Index) 9608 .addDisp(Disp, 16) 9609 .addOperand(Segment) 9610 .setMemRefs(MMOBegin, MMOEnd); 9611 9612 // Zero-extend the offset 9613 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); 9614 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64) 9615 .addImm(0) 9616 .addReg(OffsetReg) 9617 .addImm(X86::sub_32bit); 9618 9619 // Add the offset to the reg_save_area to get the final address. 9620 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg) 9621 .addReg(OffsetReg64) 9622 .addReg(RegSaveReg); 9623 9624 // Compute the offset for the next argument 9625 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass); 9626 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg) 9627 .addReg(OffsetReg) 9628 .addImm(UseFPOffset ? 16 : 8); 9629 9630 // Store it back into the va_list. 9631 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr)) 9632 .addOperand(Base) 9633 .addOperand(Scale) 9634 .addOperand(Index) 9635 .addDisp(Disp, UseFPOffset ? 4 : 0) 9636 .addOperand(Segment) 9637 .addReg(NextOffsetReg) 9638 .setMemRefs(MMOBegin, MMOEnd); 9639 9640 // Jump to endMBB 9641 BuildMI(offsetMBB, DL, TII->get(X86::JMP_4)) 9642 .addMBB(endMBB); 9643 } 9644 9645 // 9646 // Emit code to use overflow area 9647 // 9648 9649 // Load the overflow_area address into a register. 9650 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass); 9651 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg) 9652 .addOperand(Base) 9653 .addOperand(Scale) 9654 .addOperand(Index) 9655 .addDisp(Disp, 8) 9656 .addOperand(Segment) 9657 .setMemRefs(MMOBegin, MMOEnd); 9658 9659 // If we need to align it, do so. Otherwise, just copy the address 9660 // to OverflowDestReg. 9661 if (NeedsAlign) { 9662 // Align the overflow address 9663 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2"); 9664 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass); 9665 9666 // aligned_addr = (addr + (align-1)) & ~(align-1) 9667 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg) 9668 .addReg(OverflowAddrReg) 9669 .addImm(Align-1); 9670 9671 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg) 9672 .addReg(TmpReg) 9673 .addImm(~(uint64_t)(Align-1)); 9674 } else { 9675 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg) 9676 .addReg(OverflowAddrReg); 9677 } 9678 9679 // Compute the next overflow address after this argument. 9680 // (the overflow address should be kept 8-byte aligned) 9681 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass); 9682 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg) 9683 .addReg(OverflowDestReg) 9684 .addImm(ArgSizeA8); 9685 9686 // Store the new overflow address. 9687 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr)) 9688 .addOperand(Base) 9689 .addOperand(Scale) 9690 .addOperand(Index) 9691 .addDisp(Disp, 8) 9692 .addOperand(Segment) 9693 .addReg(NextAddrReg) 9694 .setMemRefs(MMOBegin, MMOEnd); 9695 9696 // If we branched, emit the PHI to the front of endMBB. 9697 if (offsetMBB) { 9698 BuildMI(*endMBB, endMBB->begin(), DL, 9699 TII->get(X86::PHI), DestReg) 9700 .addReg(OffsetDestReg).addMBB(offsetMBB) 9701 .addReg(OverflowDestReg).addMBB(overflowMBB); 9702 } 9703 9704 // Erase the pseudo instruction 9705 MI->eraseFromParent(); 9706 9707 return endMBB; 9708} 9709 9710MachineBasicBlock * 9711X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( 9712 MachineInstr *MI, 9713 MachineBasicBlock *MBB) const { 9714 // Emit code to save XMM registers to the stack. The ABI says that the 9715 // number of registers to save is given in %al, so it's theoretically 9716 // possible to do an indirect jump trick to avoid saving all of them, 9717 // however this code takes a simpler approach and just executes all 9718 // of the stores if %al is non-zero. It's less code, and it's probably 9719 // easier on the hardware branch predictor, and stores aren't all that 9720 // expensive anyway. 9721 9722 // Create the new basic blocks. One block contains all the XMM stores, 9723 // and one block is the final destination regardless of whether any 9724 // stores were performed. 9725 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 9726 MachineFunction *F = MBB->getParent(); 9727 MachineFunction::iterator MBBIter = MBB; 9728 ++MBBIter; 9729 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB); 9730 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB); 9731 F->insert(MBBIter, XMMSaveMBB); 9732 F->insert(MBBIter, EndMBB); 9733 9734 // Transfer the remainder of MBB and its successor edges to EndMBB. 9735 EndMBB->splice(EndMBB->begin(), MBB, 9736 llvm::next(MachineBasicBlock::iterator(MI)), 9737 MBB->end()); 9738 EndMBB->transferSuccessorsAndUpdatePHIs(MBB); 9739 9740 // The original block will now fall through to the XMM save block. 9741 MBB->addSuccessor(XMMSaveMBB); 9742 // The XMMSaveMBB will fall through to the end block. 9743 XMMSaveMBB->addSuccessor(EndMBB); 9744 9745 // Now add the instructions. 9746 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9747 DebugLoc DL = MI->getDebugLoc(); 9748 9749 unsigned CountReg = MI->getOperand(0).getReg(); 9750 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm(); 9751 int64_t VarArgsFPOffset = MI->getOperand(2).getImm(); 9752 9753 if (!Subtarget->isTargetWin64()) { 9754 // If %al is 0, branch around the XMM save block. 9755 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg); 9756 BuildMI(MBB, DL, TII->get(X86::JE_4)).addMBB(EndMBB); 9757 MBB->addSuccessor(EndMBB); 9758 } 9759 9760 // In the XMM save block, save all the XMM argument registers. 9761 for (int i = 3, e = MI->getNumOperands(); i != e; ++i) { 9762 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset; 9763 MachineMemOperand *MMO = 9764 F->getMachineMemOperand( 9765 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset), 9766 MachineMemOperand::MOStore, 9767 /*Size=*/16, /*Align=*/16); 9768 BuildMI(XMMSaveMBB, DL, TII->get(X86::MOVAPSmr)) 9769 .addFrameIndex(RegSaveFrameIndex) 9770 .addImm(/*Scale=*/1) 9771 .addReg(/*IndexReg=*/0) 9772 .addImm(/*Disp=*/Offset) 9773 .addReg(/*Segment=*/0) 9774 .addReg(MI->getOperand(i).getReg()) 9775 .addMemOperand(MMO); 9776 } 9777 9778 MI->eraseFromParent(); // The pseudo instruction is gone now. 9779 9780 return EndMBB; 9781} 9782 9783MachineBasicBlock * 9784X86TargetLowering::EmitLoweredSelect(MachineInstr *MI, 9785 MachineBasicBlock *BB) const { 9786 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9787 DebugLoc DL = MI->getDebugLoc(); 9788 9789 // To "insert" a SELECT_CC instruction, we actually have to insert the 9790 // diamond control-flow pattern. The incoming instruction knows the 9791 // destination vreg to set, the condition code register to branch on, the 9792 // true/false values to select between, and a branch opcode to use. 9793 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9794 MachineFunction::iterator It = BB; 9795 ++It; 9796 9797 // thisMBB: 9798 // ... 9799 // TrueVal = ... 9800 // cmpTY ccX, r1, r2 9801 // bCC copy1MBB 9802 // fallthrough --> copy0MBB 9803 MachineBasicBlock *thisMBB = BB; 9804 MachineFunction *F = BB->getParent(); 9805 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 9806 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 9807 F->insert(It, copy0MBB); 9808 F->insert(It, sinkMBB); 9809 9810 // If the EFLAGS register isn't dead in the terminator, then claim that it's 9811 // live into the sink and copy blocks. 9812 const MachineFunction *MF = BB->getParent(); 9813 const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo(); 9814 BitVector ReservedRegs = TRI->getReservedRegs(*MF); 9815 9816 for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) { 9817 const MachineOperand &MO = MI->getOperand(I); 9818 if (!MO.isReg() || !MO.isUse() || MO.isKill()) continue; 9819 unsigned Reg = MO.getReg(); 9820 if (Reg != X86::EFLAGS) continue; 9821 copy0MBB->addLiveIn(Reg); 9822 sinkMBB->addLiveIn(Reg); 9823 } 9824 9825 // Transfer the remainder of BB and its successor edges to sinkMBB. 9826 sinkMBB->splice(sinkMBB->begin(), BB, 9827 llvm::next(MachineBasicBlock::iterator(MI)), 9828 BB->end()); 9829 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 9830 9831 // Add the true and fallthrough blocks as its successors. 9832 BB->addSuccessor(copy0MBB); 9833 BB->addSuccessor(sinkMBB); 9834 9835 // Create the conditional branch instruction. 9836 unsigned Opc = 9837 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 9838 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB); 9839 9840 // copy0MBB: 9841 // %FalseValue = ... 9842 // # fallthrough to sinkMBB 9843 copy0MBB->addSuccessor(sinkMBB); 9844 9845 // sinkMBB: 9846 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 9847 // ... 9848 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 9849 TII->get(X86::PHI), MI->getOperand(0).getReg()) 9850 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 9851 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 9852 9853 MI->eraseFromParent(); // The pseudo instruction is gone now. 9854 return sinkMBB; 9855} 9856 9857MachineBasicBlock * 9858X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI, 9859 MachineBasicBlock *BB) const { 9860 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9861 DebugLoc DL = MI->getDebugLoc(); 9862 9863 // The lowering is pretty easy: we're just emitting the call to _alloca. The 9864 // non-trivial part is impdef of ESP. 9865 // FIXME: The code should be tweaked as soon as we'll try to do codegen for 9866 // mingw-w64. 9867 9868 const char *StackProbeSymbol = 9869 Subtarget->isTargetWindows() ? "_chkstk" : "_alloca"; 9870 9871 BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32)) 9872 .addExternalSymbol(StackProbeSymbol) 9873 .addReg(X86::EAX, RegState::Implicit) 9874 .addReg(X86::ESP, RegState::Implicit) 9875 .addReg(X86::EAX, RegState::Define | RegState::Implicit) 9876 .addReg(X86::ESP, RegState::Define | RegState::Implicit) 9877 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 9878 9879 MI->eraseFromParent(); // The pseudo instruction is gone now. 9880 return BB; 9881} 9882 9883MachineBasicBlock * 9884X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI, 9885 MachineBasicBlock *BB) const { 9886 // This is pretty easy. We're taking the value that we received from 9887 // our load from the relocation, sticking it in either RDI (x86-64) 9888 // or EAX and doing an indirect call. The return value will then 9889 // be in the normal return register. 9890 const X86InstrInfo *TII 9891 = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo()); 9892 DebugLoc DL = MI->getDebugLoc(); 9893 MachineFunction *F = BB->getParent(); 9894 9895 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?"); 9896 assert(MI->getOperand(3).isGlobal() && "This should be a global"); 9897 9898 if (Subtarget->is64Bit()) { 9899 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 9900 TII->get(X86::MOV64rm), X86::RDI) 9901 .addReg(X86::RIP) 9902 .addImm(0).addReg(0) 9903 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 9904 MI->getOperand(3).getTargetFlags()) 9905 .addReg(0); 9906 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m)); 9907 addDirectMem(MIB, X86::RDI); 9908 } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { 9909 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 9910 TII->get(X86::MOV32rm), X86::EAX) 9911 .addReg(0) 9912 .addImm(0).addReg(0) 9913 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 9914 MI->getOperand(3).getTargetFlags()) 9915 .addReg(0); 9916 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 9917 addDirectMem(MIB, X86::EAX); 9918 } else { 9919 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 9920 TII->get(X86::MOV32rm), X86::EAX) 9921 .addReg(TII->getGlobalBaseReg(F)) 9922 .addImm(0).addReg(0) 9923 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 9924 MI->getOperand(3).getTargetFlags()) 9925 .addReg(0); 9926 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 9927 addDirectMem(MIB, X86::EAX); 9928 } 9929 9930 MI->eraseFromParent(); // The pseudo instruction is gone now. 9931 return BB; 9932} 9933 9934MachineBasicBlock * 9935X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 9936 MachineBasicBlock *BB) const { 9937 switch (MI->getOpcode()) { 9938 default: assert(false && "Unexpected instr type to insert"); 9939 case X86::WIN_ALLOCA: 9940 return EmitLoweredWinAlloca(MI, BB); 9941 case X86::TLSCall_32: 9942 case X86::TLSCall_64: 9943 return EmitLoweredTLSCall(MI, BB); 9944 case X86::CMOV_GR8: 9945 case X86::CMOV_FR32: 9946 case X86::CMOV_FR64: 9947 case X86::CMOV_V4F32: 9948 case X86::CMOV_V2F64: 9949 case X86::CMOV_V2I64: 9950 case X86::CMOV_GR16: 9951 case X86::CMOV_GR32: 9952 case X86::CMOV_RFP32: 9953 case X86::CMOV_RFP64: 9954 case X86::CMOV_RFP80: 9955 return EmitLoweredSelect(MI, BB); 9956 9957 case X86::FP32_TO_INT16_IN_MEM: 9958 case X86::FP32_TO_INT32_IN_MEM: 9959 case X86::FP32_TO_INT64_IN_MEM: 9960 case X86::FP64_TO_INT16_IN_MEM: 9961 case X86::FP64_TO_INT32_IN_MEM: 9962 case X86::FP64_TO_INT64_IN_MEM: 9963 case X86::FP80_TO_INT16_IN_MEM: 9964 case X86::FP80_TO_INT32_IN_MEM: 9965 case X86::FP80_TO_INT64_IN_MEM: { 9966 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9967 DebugLoc DL = MI->getDebugLoc(); 9968 9969 // Change the floating point control register to use "round towards zero" 9970 // mode when truncating to an integer value. 9971 MachineFunction *F = BB->getParent(); 9972 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false); 9973 addFrameReference(BuildMI(*BB, MI, DL, 9974 TII->get(X86::FNSTCW16m)), CWFrameIdx); 9975 9976 // Load the old value of the high byte of the control word... 9977 unsigned OldCW = 9978 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass); 9979 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW), 9980 CWFrameIdx); 9981 9982 // Set the high part to be round to zero... 9983 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx) 9984 .addImm(0xC7F); 9985 9986 // Reload the modified control word now... 9987 addFrameReference(BuildMI(*BB, MI, DL, 9988 TII->get(X86::FLDCW16m)), CWFrameIdx); 9989 9990 // Restore the memory image of control word to original value 9991 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx) 9992 .addReg(OldCW); 9993 9994 // Get the X86 opcode to use. 9995 unsigned Opc; 9996 switch (MI->getOpcode()) { 9997 default: llvm_unreachable("illegal opcode!"); 9998 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 9999 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 10000 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 10001 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 10002 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 10003 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 10004 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 10005 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 10006 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 10007 } 10008 10009 X86AddressMode AM; 10010 MachineOperand &Op = MI->getOperand(0); 10011 if (Op.isReg()) { 10012 AM.BaseType = X86AddressMode::RegBase; 10013 AM.Base.Reg = Op.getReg(); 10014 } else { 10015 AM.BaseType = X86AddressMode::FrameIndexBase; 10016 AM.Base.FrameIndex = Op.getIndex(); 10017 } 10018 Op = MI->getOperand(1); 10019 if (Op.isImm()) 10020 AM.Scale = Op.getImm(); 10021 Op = MI->getOperand(2); 10022 if (Op.isImm()) 10023 AM.IndexReg = Op.getImm(); 10024 Op = MI->getOperand(3); 10025 if (Op.isGlobal()) { 10026 AM.GV = Op.getGlobal(); 10027 } else { 10028 AM.Disp = Op.getImm(); 10029 } 10030 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM) 10031 .addReg(MI->getOperand(X86::AddrNumOperands).getReg()); 10032 10033 // Reload the original control word now. 10034 addFrameReference(BuildMI(*BB, MI, DL, 10035 TII->get(X86::FLDCW16m)), CWFrameIdx); 10036 10037 MI->eraseFromParent(); // The pseudo instruction is gone now. 10038 return BB; 10039 } 10040 // String/text processing lowering. 10041 case X86::PCMPISTRM128REG: 10042 case X86::VPCMPISTRM128REG: 10043 return EmitPCMP(MI, BB, 3, false /* in-mem */); 10044 case X86::PCMPISTRM128MEM: 10045 case X86::VPCMPISTRM128MEM: 10046 return EmitPCMP(MI, BB, 3, true /* in-mem */); 10047 case X86::PCMPESTRM128REG: 10048 case X86::VPCMPESTRM128REG: 10049 return EmitPCMP(MI, BB, 5, false /* in mem */); 10050 case X86::PCMPESTRM128MEM: 10051 case X86::VPCMPESTRM128MEM: 10052 return EmitPCMP(MI, BB, 5, true /* in mem */); 10053 10054 // Atomic Lowering. 10055 case X86::ATOMAND32: 10056 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, 10057 X86::AND32ri, X86::MOV32rm, 10058 X86::LCMPXCHG32, 10059 X86::NOT32r, X86::EAX, 10060 X86::GR32RegisterClass); 10061 case X86::ATOMOR32: 10062 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr, 10063 X86::OR32ri, X86::MOV32rm, 10064 X86::LCMPXCHG32, 10065 X86::NOT32r, X86::EAX, 10066 X86::GR32RegisterClass); 10067 case X86::ATOMXOR32: 10068 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr, 10069 X86::XOR32ri, X86::MOV32rm, 10070 X86::LCMPXCHG32, 10071 X86::NOT32r, X86::EAX, 10072 X86::GR32RegisterClass); 10073 case X86::ATOMNAND32: 10074 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, 10075 X86::AND32ri, X86::MOV32rm, 10076 X86::LCMPXCHG32, 10077 X86::NOT32r, X86::EAX, 10078 X86::GR32RegisterClass, true); 10079 case X86::ATOMMIN32: 10080 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr); 10081 case X86::ATOMMAX32: 10082 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG32rr); 10083 case X86::ATOMUMIN32: 10084 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB32rr); 10085 case X86::ATOMUMAX32: 10086 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA32rr); 10087 10088 case X86::ATOMAND16: 10089 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr, 10090 X86::AND16ri, X86::MOV16rm, 10091 X86::LCMPXCHG16, 10092 X86::NOT16r, X86::AX, 10093 X86::GR16RegisterClass); 10094 case X86::ATOMOR16: 10095 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr, 10096 X86::OR16ri, X86::MOV16rm, 10097 X86::LCMPXCHG16, 10098 X86::NOT16r, X86::AX, 10099 X86::GR16RegisterClass); 10100 case X86::ATOMXOR16: 10101 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR16rr, 10102 X86::XOR16ri, X86::MOV16rm, 10103 X86::LCMPXCHG16, 10104 X86::NOT16r, X86::AX, 10105 X86::GR16RegisterClass); 10106 case X86::ATOMNAND16: 10107 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr, 10108 X86::AND16ri, X86::MOV16rm, 10109 X86::LCMPXCHG16, 10110 X86::NOT16r, X86::AX, 10111 X86::GR16RegisterClass, true); 10112 case X86::ATOMMIN16: 10113 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL16rr); 10114 case X86::ATOMMAX16: 10115 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG16rr); 10116 case X86::ATOMUMIN16: 10117 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB16rr); 10118 case X86::ATOMUMAX16: 10119 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA16rr); 10120 10121 case X86::ATOMAND8: 10122 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr, 10123 X86::AND8ri, X86::MOV8rm, 10124 X86::LCMPXCHG8, 10125 X86::NOT8r, X86::AL, 10126 X86::GR8RegisterClass); 10127 case X86::ATOMOR8: 10128 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr, 10129 X86::OR8ri, X86::MOV8rm, 10130 X86::LCMPXCHG8, 10131 X86::NOT8r, X86::AL, 10132 X86::GR8RegisterClass); 10133 case X86::ATOMXOR8: 10134 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR8rr, 10135 X86::XOR8ri, X86::MOV8rm, 10136 X86::LCMPXCHG8, 10137 X86::NOT8r, X86::AL, 10138 X86::GR8RegisterClass); 10139 case X86::ATOMNAND8: 10140 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr, 10141 X86::AND8ri, X86::MOV8rm, 10142 X86::LCMPXCHG8, 10143 X86::NOT8r, X86::AL, 10144 X86::GR8RegisterClass, true); 10145 // FIXME: There are no CMOV8 instructions; MIN/MAX need some other way. 10146 // This group is for 64-bit host. 10147 case X86::ATOMAND64: 10148 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr, 10149 X86::AND64ri32, X86::MOV64rm, 10150 X86::LCMPXCHG64, 10151 X86::NOT64r, X86::RAX, 10152 X86::GR64RegisterClass); 10153 case X86::ATOMOR64: 10154 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr, 10155 X86::OR64ri32, X86::MOV64rm, 10156 X86::LCMPXCHG64, 10157 X86::NOT64r, X86::RAX, 10158 X86::GR64RegisterClass); 10159 case X86::ATOMXOR64: 10160 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr, 10161 X86::XOR64ri32, X86::MOV64rm, 10162 X86::LCMPXCHG64, 10163 X86::NOT64r, X86::RAX, 10164 X86::GR64RegisterClass); 10165 case X86::ATOMNAND64: 10166 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr, 10167 X86::AND64ri32, X86::MOV64rm, 10168 X86::LCMPXCHG64, 10169 X86::NOT64r, X86::RAX, 10170 X86::GR64RegisterClass, true); 10171 case X86::ATOMMIN64: 10172 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL64rr); 10173 case X86::ATOMMAX64: 10174 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG64rr); 10175 case X86::ATOMUMIN64: 10176 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB64rr); 10177 case X86::ATOMUMAX64: 10178 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA64rr); 10179 10180 // This group does 64-bit operations on a 32-bit host. 10181 case X86::ATOMAND6432: 10182 return EmitAtomicBit6432WithCustomInserter(MI, BB, 10183 X86::AND32rr, X86::AND32rr, 10184 X86::AND32ri, X86::AND32ri, 10185 false); 10186 case X86::ATOMOR6432: 10187 return EmitAtomicBit6432WithCustomInserter(MI, BB, 10188 X86::OR32rr, X86::OR32rr, 10189 X86::OR32ri, X86::OR32ri, 10190 false); 10191 case X86::ATOMXOR6432: 10192 return EmitAtomicBit6432WithCustomInserter(MI, BB, 10193 X86::XOR32rr, X86::XOR32rr, 10194 X86::XOR32ri, X86::XOR32ri, 10195 false); 10196 case X86::ATOMNAND6432: 10197 return EmitAtomicBit6432WithCustomInserter(MI, BB, 10198 X86::AND32rr, X86::AND32rr, 10199 X86::AND32ri, X86::AND32ri, 10200 true); 10201 case X86::ATOMADD6432: 10202 return EmitAtomicBit6432WithCustomInserter(MI, BB, 10203 X86::ADD32rr, X86::ADC32rr, 10204 X86::ADD32ri, X86::ADC32ri, 10205 false); 10206 case X86::ATOMSUB6432: 10207 return EmitAtomicBit6432WithCustomInserter(MI, BB, 10208 X86::SUB32rr, X86::SBB32rr, 10209 X86::SUB32ri, X86::SBB32ri, 10210 false); 10211 case X86::ATOMSWAP6432: 10212 return EmitAtomicBit6432WithCustomInserter(MI, BB, 10213 X86::MOV32rr, X86::MOV32rr, 10214 X86::MOV32ri, X86::MOV32ri, 10215 false); 10216 case X86::VASTART_SAVE_XMM_REGS: 10217 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB); 10218 10219 case X86::VAARG_64: 10220 return EmitVAARG64WithCustomInserter(MI, BB); 10221 } 10222} 10223 10224//===----------------------------------------------------------------------===// 10225// X86 Optimization Hooks 10226//===----------------------------------------------------------------------===// 10227 10228void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 10229 const APInt &Mask, 10230 APInt &KnownZero, 10231 APInt &KnownOne, 10232 const SelectionDAG &DAG, 10233 unsigned Depth) const { 10234 unsigned Opc = Op.getOpcode(); 10235 assert((Opc >= ISD::BUILTIN_OP_END || 10236 Opc == ISD::INTRINSIC_WO_CHAIN || 10237 Opc == ISD::INTRINSIC_W_CHAIN || 10238 Opc == ISD::INTRINSIC_VOID) && 10239 "Should use MaskedValueIsZero if you don't know whether Op" 10240 " is a target node!"); 10241 10242 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything. 10243 switch (Opc) { 10244 default: break; 10245 case X86ISD::ADD: 10246 case X86ISD::SUB: 10247 case X86ISD::SMUL: 10248 case X86ISD::UMUL: 10249 case X86ISD::INC: 10250 case X86ISD::DEC: 10251 case X86ISD::OR: 10252 case X86ISD::XOR: 10253 case X86ISD::AND: 10254 // These nodes' second result is a boolean. 10255 if (Op.getResNo() == 0) 10256 break; 10257 // Fallthrough 10258 case X86ISD::SETCC: 10259 KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(), 10260 Mask.getBitWidth() - 1); 10261 break; 10262 } 10263} 10264 10265unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 10266 unsigned Depth) const { 10267 // SETCC_CARRY sets the dest to ~0 for true or 0 for false. 10268 if (Op.getOpcode() == X86ISD::SETCC_CARRY) 10269 return Op.getValueType().getScalarType().getSizeInBits(); 10270 10271 // Fallback case. 10272 return 1; 10273} 10274 10275/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 10276/// node is a GlobalAddress + offset. 10277bool X86TargetLowering::isGAPlusOffset(SDNode *N, 10278 const GlobalValue* &GA, 10279 int64_t &Offset) const { 10280 if (N->getOpcode() == X86ISD::Wrapper) { 10281 if (isa<GlobalAddressSDNode>(N->getOperand(0))) { 10282 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 10283 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset(); 10284 return true; 10285 } 10286 } 10287 return TargetLowering::isGAPlusOffset(N, GA, Offset); 10288} 10289 10290/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 10291/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 10292/// if the load addresses are consecutive, non-overlapping, and in the right 10293/// order. 10294static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 10295 const TargetLowering &TLI) { 10296 DebugLoc dl = N->getDebugLoc(); 10297 EVT VT = N->getValueType(0); 10298 10299 if (VT.getSizeInBits() != 128) 10300 return SDValue(); 10301 10302 SmallVector<SDValue, 16> Elts; 10303 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 10304 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0)); 10305 10306 return EltsFromConsecutiveLoads(VT, Elts, dl, DAG); 10307} 10308 10309/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index 10310/// generation and convert it from being a bunch of shuffles and extracts 10311/// to a simple store and scalar loads to extract the elements. 10312static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, 10313 const TargetLowering &TLI) { 10314 SDValue InputVector = N->getOperand(0); 10315 10316 // Only operate on vectors of 4 elements, where the alternative shuffling 10317 // gets to be more expensive. 10318 if (InputVector.getValueType() != MVT::v4i32) 10319 return SDValue(); 10320 10321 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a 10322 // single use which is a sign-extend or zero-extend, and all elements are 10323 // used. 10324 SmallVector<SDNode *, 4> Uses; 10325 unsigned ExtractedElements = 0; 10326 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(), 10327 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) { 10328 if (UI.getUse().getResNo() != InputVector.getResNo()) 10329 return SDValue(); 10330 10331 SDNode *Extract = *UI; 10332 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 10333 return SDValue(); 10334 10335 if (Extract->getValueType(0) != MVT::i32) 10336 return SDValue(); 10337 if (!Extract->hasOneUse()) 10338 return SDValue(); 10339 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND && 10340 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND) 10341 return SDValue(); 10342 if (!isa<ConstantSDNode>(Extract->getOperand(1))) 10343 return SDValue(); 10344 10345 // Record which element was extracted. 10346 ExtractedElements |= 10347 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue(); 10348 10349 Uses.push_back(Extract); 10350 } 10351 10352 // If not all the elements were used, this may not be worthwhile. 10353 if (ExtractedElements != 15) 10354 return SDValue(); 10355 10356 // Ok, we've now decided to do the transformation. 10357 DebugLoc dl = InputVector.getDebugLoc(); 10358 10359 // Store the value to a temporary stack slot. 10360 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType()); 10361 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr, 10362 MachinePointerInfo(), false, false, 0); 10363 10364 // Replace each use (extract) with a load of the appropriate element. 10365 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(), 10366 UE = Uses.end(); UI != UE; ++UI) { 10367 SDNode *Extract = *UI; 10368 10369 // Compute the element's address. 10370 SDValue Idx = Extract->getOperand(1); 10371 unsigned EltSize = 10372 InputVector.getValueType().getVectorElementType().getSizeInBits()/8; 10373 uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue(); 10374 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy()); 10375 10376 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), 10377 StackPtr, OffsetVal); 10378 10379 // Load the scalar. 10380 SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch, 10381 ScalarAddr, MachinePointerInfo(), 10382 false, false, 0); 10383 10384 // Replace the exact with the load. 10385 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar); 10386 } 10387 10388 // The replacement was made in place; don't return anything. 10389 return SDValue(); 10390} 10391 10392/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 10393static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 10394 const X86Subtarget *Subtarget) { 10395 DebugLoc DL = N->getDebugLoc(); 10396 SDValue Cond = N->getOperand(0); 10397 // Get the LHS/RHS of the select. 10398 SDValue LHS = N->getOperand(1); 10399 SDValue RHS = N->getOperand(2); 10400 10401 // If we have SSE[12] support, try to form min/max nodes. SSE min/max 10402 // instructions match the semantics of the common C idiom x<y?x:y but not 10403 // x<=y?x:y, because of how they handle negative zero (which can be 10404 // ignored in unsafe-math mode). 10405 if (Subtarget->hasSSE2() && 10406 (LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64) && 10407 Cond.getOpcode() == ISD::SETCC) { 10408 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 10409 10410 unsigned Opcode = 0; 10411 // Check for x CC y ? x : y. 10412 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) && 10413 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 10414 switch (CC) { 10415 default: break; 10416 case ISD::SETULT: 10417 // Converting this to a min would handle NaNs incorrectly, and swapping 10418 // the operands would cause it to handle comparisons between positive 10419 // and negative zero incorrectly. 10420 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 10421 if (!UnsafeFPMath && 10422 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 10423 break; 10424 std::swap(LHS, RHS); 10425 } 10426 Opcode = X86ISD::FMIN; 10427 break; 10428 case ISD::SETOLE: 10429 // Converting this to a min would handle comparisons between positive 10430 // and negative zero incorrectly. 10431 if (!UnsafeFPMath && 10432 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 10433 break; 10434 Opcode = X86ISD::FMIN; 10435 break; 10436 case ISD::SETULE: 10437 // Converting this to a min would handle both negative zeros and NaNs 10438 // incorrectly, but we can swap the operands to fix both. 10439 std::swap(LHS, RHS); 10440 case ISD::SETOLT: 10441 case ISD::SETLT: 10442 case ISD::SETLE: 10443 Opcode = X86ISD::FMIN; 10444 break; 10445 10446 case ISD::SETOGE: 10447 // Converting this to a max would handle comparisons between positive 10448 // and negative zero incorrectly. 10449 if (!UnsafeFPMath && 10450 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(LHS)) 10451 break; 10452 Opcode = X86ISD::FMAX; 10453 break; 10454 case ISD::SETUGT: 10455 // Converting this to a max would handle NaNs incorrectly, and swapping 10456 // the operands would cause it to handle comparisons between positive 10457 // and negative zero incorrectly. 10458 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 10459 if (!UnsafeFPMath && 10460 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 10461 break; 10462 std::swap(LHS, RHS); 10463 } 10464 Opcode = X86ISD::FMAX; 10465 break; 10466 case ISD::SETUGE: 10467 // Converting this to a max would handle both negative zeros and NaNs 10468 // incorrectly, but we can swap the operands to fix both. 10469 std::swap(LHS, RHS); 10470 case ISD::SETOGT: 10471 case ISD::SETGT: 10472 case ISD::SETGE: 10473 Opcode = X86ISD::FMAX; 10474 break; 10475 } 10476 // Check for x CC y ? y : x -- a min/max with reversed arms. 10477 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) && 10478 DAG.isEqualTo(RHS, Cond.getOperand(0))) { 10479 switch (CC) { 10480 default: break; 10481 case ISD::SETOGE: 10482 // Converting this to a min would handle comparisons between positive 10483 // and negative zero incorrectly, and swapping the operands would 10484 // cause it to handle NaNs incorrectly. 10485 if (!UnsafeFPMath && 10486 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) { 10487 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 10488 break; 10489 std::swap(LHS, RHS); 10490 } 10491 Opcode = X86ISD::FMIN; 10492 break; 10493 case ISD::SETUGT: 10494 // Converting this to a min would handle NaNs incorrectly. 10495 if (!UnsafeFPMath && 10496 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) 10497 break; 10498 Opcode = X86ISD::FMIN; 10499 break; 10500 case ISD::SETUGE: 10501 // Converting this to a min would handle both negative zeros and NaNs 10502 // incorrectly, but we can swap the operands to fix both. 10503 std::swap(LHS, RHS); 10504 case ISD::SETOGT: 10505 case ISD::SETGT: 10506 case ISD::SETGE: 10507 Opcode = X86ISD::FMIN; 10508 break; 10509 10510 case ISD::SETULT: 10511 // Converting this to a max would handle NaNs incorrectly. 10512 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 10513 break; 10514 Opcode = X86ISD::FMAX; 10515 break; 10516 case ISD::SETOLE: 10517 // Converting this to a max would handle comparisons between positive 10518 // and negative zero incorrectly, and swapping the operands would 10519 // cause it to handle NaNs incorrectly. 10520 if (!UnsafeFPMath && 10521 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) { 10522 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 10523 break; 10524 std::swap(LHS, RHS); 10525 } 10526 Opcode = X86ISD::FMAX; 10527 break; 10528 case ISD::SETULE: 10529 // Converting this to a max would handle both negative zeros and NaNs 10530 // incorrectly, but we can swap the operands to fix both. 10531 std::swap(LHS, RHS); 10532 case ISD::SETOLT: 10533 case ISD::SETLT: 10534 case ISD::SETLE: 10535 Opcode = X86ISD::FMAX; 10536 break; 10537 } 10538 } 10539 10540 if (Opcode) 10541 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS); 10542 } 10543 10544 // If this is a select between two integer constants, try to do some 10545 // optimizations. 10546 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) { 10547 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS)) 10548 // Don't do this for crazy integer types. 10549 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) { 10550 // If this is efficiently invertible, canonicalize the LHSC/RHSC values 10551 // so that TrueC (the true value) is larger than FalseC. 10552 bool NeedsCondInvert = false; 10553 10554 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) && 10555 // Efficiently invertible. 10556 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible. 10557 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible. 10558 isa<ConstantSDNode>(Cond.getOperand(1))))) { 10559 NeedsCondInvert = true; 10560 std::swap(TrueC, FalseC); 10561 } 10562 10563 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0. 10564 if (FalseC->getAPIntValue() == 0 && 10565 TrueC->getAPIntValue().isPowerOf2()) { 10566 if (NeedsCondInvert) // Invert the condition if needed. 10567 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 10568 DAG.getConstant(1, Cond.getValueType())); 10569 10570 // Zero extend the condition if needed. 10571 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond); 10572 10573 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 10574 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond, 10575 DAG.getConstant(ShAmt, MVT::i8)); 10576 } 10577 10578 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. 10579 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 10580 if (NeedsCondInvert) // Invert the condition if needed. 10581 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 10582 DAG.getConstant(1, Cond.getValueType())); 10583 10584 // Zero extend the condition if needed. 10585 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 10586 FalseC->getValueType(0), Cond); 10587 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 10588 SDValue(FalseC, 0)); 10589 } 10590 10591 // Optimize cases that will turn into an LEA instruction. This requires 10592 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 10593 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 10594 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 10595 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 10596 10597 bool isFastMultiplier = false; 10598 if (Diff < 10) { 10599 switch ((unsigned char)Diff) { 10600 default: break; 10601 case 1: // result = add base, cond 10602 case 2: // result = lea base( , cond*2) 10603 case 3: // result = lea base(cond, cond*2) 10604 case 4: // result = lea base( , cond*4) 10605 case 5: // result = lea base(cond, cond*4) 10606 case 8: // result = lea base( , cond*8) 10607 case 9: // result = lea base(cond, cond*8) 10608 isFastMultiplier = true; 10609 break; 10610 } 10611 } 10612 10613 if (isFastMultiplier) { 10614 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 10615 if (NeedsCondInvert) // Invert the condition if needed. 10616 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 10617 DAG.getConstant(1, Cond.getValueType())); 10618 10619 // Zero extend the condition if needed. 10620 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 10621 Cond); 10622 // Scale the condition by the difference. 10623 if (Diff != 1) 10624 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 10625 DAG.getConstant(Diff, Cond.getValueType())); 10626 10627 // Add the base if non-zero. 10628 if (FalseC->getAPIntValue() != 0) 10629 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 10630 SDValue(FalseC, 0)); 10631 return Cond; 10632 } 10633 } 10634 } 10635 } 10636 10637 return SDValue(); 10638} 10639 10640/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL] 10641static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, 10642 TargetLowering::DAGCombinerInfo &DCI) { 10643 DebugLoc DL = N->getDebugLoc(); 10644 10645 // If the flag operand isn't dead, don't touch this CMOV. 10646 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty()) 10647 return SDValue(); 10648 10649 // If this is a select between two integer constants, try to do some 10650 // optimizations. Note that the operands are ordered the opposite of SELECT 10651 // operands. 10652 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(N->getOperand(1))) { 10653 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 10654 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is 10655 // larger than FalseC (the false value). 10656 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2); 10657 10658 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) { 10659 CC = X86::GetOppositeBranchCondition(CC); 10660 std::swap(TrueC, FalseC); 10661 } 10662 10663 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0. 10664 // This is efficient for any integer data type (including i8/i16) and 10665 // shift amount. 10666 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) { 10667 SDValue Cond = N->getOperand(3); 10668 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 10669 DAG.getConstant(CC, MVT::i8), Cond); 10670 10671 // Zero extend the condition if needed. 10672 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond); 10673 10674 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 10675 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond, 10676 DAG.getConstant(ShAmt, MVT::i8)); 10677 if (N->getNumValues() == 2) // Dead flag value? 10678 return DCI.CombineTo(N, Cond, SDValue()); 10679 return Cond; 10680 } 10681 10682 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient 10683 // for any integer data type, including i8/i16. 10684 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 10685 SDValue Cond = N->getOperand(3); 10686 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 10687 DAG.getConstant(CC, MVT::i8), Cond); 10688 10689 // Zero extend the condition if needed. 10690 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 10691 FalseC->getValueType(0), Cond); 10692 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 10693 SDValue(FalseC, 0)); 10694 10695 if (N->getNumValues() == 2) // Dead flag value? 10696 return DCI.CombineTo(N, Cond, SDValue()); 10697 return Cond; 10698 } 10699 10700 // Optimize cases that will turn into an LEA instruction. This requires 10701 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 10702 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 10703 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 10704 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 10705 10706 bool isFastMultiplier = false; 10707 if (Diff < 10) { 10708 switch ((unsigned char)Diff) { 10709 default: break; 10710 case 1: // result = add base, cond 10711 case 2: // result = lea base( , cond*2) 10712 case 3: // result = lea base(cond, cond*2) 10713 case 4: // result = lea base( , cond*4) 10714 case 5: // result = lea base(cond, cond*4) 10715 case 8: // result = lea base( , cond*8) 10716 case 9: // result = lea base(cond, cond*8) 10717 isFastMultiplier = true; 10718 break; 10719 } 10720 } 10721 10722 if (isFastMultiplier) { 10723 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 10724 SDValue Cond = N->getOperand(3); 10725 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 10726 DAG.getConstant(CC, MVT::i8), Cond); 10727 // Zero extend the condition if needed. 10728 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 10729 Cond); 10730 // Scale the condition by the difference. 10731 if (Diff != 1) 10732 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 10733 DAG.getConstant(Diff, Cond.getValueType())); 10734 10735 // Add the base if non-zero. 10736 if (FalseC->getAPIntValue() != 0) 10737 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 10738 SDValue(FalseC, 0)); 10739 if (N->getNumValues() == 2) // Dead flag value? 10740 return DCI.CombineTo(N, Cond, SDValue()); 10741 return Cond; 10742 } 10743 } 10744 } 10745 } 10746 return SDValue(); 10747} 10748 10749 10750/// PerformMulCombine - Optimize a single multiply with constant into two 10751/// in order to implement it with two cheaper instructions, e.g. 10752/// LEA + SHL, LEA + LEA. 10753static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG, 10754 TargetLowering::DAGCombinerInfo &DCI) { 10755 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 10756 return SDValue(); 10757 10758 EVT VT = N->getValueType(0); 10759 if (VT != MVT::i64) 10760 return SDValue(); 10761 10762 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 10763 if (!C) 10764 return SDValue(); 10765 uint64_t MulAmt = C->getZExtValue(); 10766 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9) 10767 return SDValue(); 10768 10769 uint64_t MulAmt1 = 0; 10770 uint64_t MulAmt2 = 0; 10771 if ((MulAmt % 9) == 0) { 10772 MulAmt1 = 9; 10773 MulAmt2 = MulAmt / 9; 10774 } else if ((MulAmt % 5) == 0) { 10775 MulAmt1 = 5; 10776 MulAmt2 = MulAmt / 5; 10777 } else if ((MulAmt % 3) == 0) { 10778 MulAmt1 = 3; 10779 MulAmt2 = MulAmt / 3; 10780 } 10781 if (MulAmt2 && 10782 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){ 10783 DebugLoc DL = N->getDebugLoc(); 10784 10785 if (isPowerOf2_64(MulAmt2) && 10786 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD)) 10787 // If second multiplifer is pow2, issue it first. We want the multiply by 10788 // 3, 5, or 9 to be folded into the addressing mode unless the lone use 10789 // is an add. 10790 std::swap(MulAmt1, MulAmt2); 10791 10792 SDValue NewMul; 10793 if (isPowerOf2_64(MulAmt1)) 10794 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 10795 DAG.getConstant(Log2_64(MulAmt1), MVT::i8)); 10796 else 10797 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), 10798 DAG.getConstant(MulAmt1, VT)); 10799 10800 if (isPowerOf2_64(MulAmt2)) 10801 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul, 10802 DAG.getConstant(Log2_64(MulAmt2), MVT::i8)); 10803 else 10804 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul, 10805 DAG.getConstant(MulAmt2, VT)); 10806 10807 // Do not add new nodes to DAG combiner worklist. 10808 DCI.CombineTo(N, NewMul, false); 10809 } 10810 return SDValue(); 10811} 10812 10813static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { 10814 SDValue N0 = N->getOperand(0); 10815 SDValue N1 = N->getOperand(1); 10816 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 10817 EVT VT = N0.getValueType(); 10818 10819 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2)) 10820 // since the result of setcc_c is all zero's or all ones. 10821 if (N1C && N0.getOpcode() == ISD::AND && 10822 N0.getOperand(1).getOpcode() == ISD::Constant) { 10823 SDValue N00 = N0.getOperand(0); 10824 if (N00.getOpcode() == X86ISD::SETCC_CARRY || 10825 ((N00.getOpcode() == ISD::ANY_EXTEND || 10826 N00.getOpcode() == ISD::ZERO_EXTEND) && 10827 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) { 10828 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 10829 APInt ShAmt = N1C->getAPIntValue(); 10830 Mask = Mask.shl(ShAmt); 10831 if (Mask != 0) 10832 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 10833 N00, DAG.getConstant(Mask, VT)); 10834 } 10835 } 10836 10837 return SDValue(); 10838} 10839 10840/// PerformShiftCombine - Transforms vector shift nodes to use vector shifts 10841/// when possible. 10842static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG, 10843 const X86Subtarget *Subtarget) { 10844 EVT VT = N->getValueType(0); 10845 if (!VT.isVector() && VT.isInteger() && 10846 N->getOpcode() == ISD::SHL) 10847 return PerformSHLCombine(N, DAG); 10848 10849 // On X86 with SSE2 support, we can transform this to a vector shift if 10850 // all elements are shifted by the same amount. We can't do this in legalize 10851 // because the a constant vector is typically transformed to a constant pool 10852 // so we have no knowledge of the shift amount. 10853 if (!Subtarget->hasSSE2()) 10854 return SDValue(); 10855 10856 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16) 10857 return SDValue(); 10858 10859 SDValue ShAmtOp = N->getOperand(1); 10860 EVT EltVT = VT.getVectorElementType(); 10861 DebugLoc DL = N->getDebugLoc(); 10862 SDValue BaseShAmt = SDValue(); 10863 if (ShAmtOp.getOpcode() == ISD::BUILD_VECTOR) { 10864 unsigned NumElts = VT.getVectorNumElements(); 10865 unsigned i = 0; 10866 for (; i != NumElts; ++i) { 10867 SDValue Arg = ShAmtOp.getOperand(i); 10868 if (Arg.getOpcode() == ISD::UNDEF) continue; 10869 BaseShAmt = Arg; 10870 break; 10871 } 10872 for (; i != NumElts; ++i) { 10873 SDValue Arg = ShAmtOp.getOperand(i); 10874 if (Arg.getOpcode() == ISD::UNDEF) continue; 10875 if (Arg != BaseShAmt) { 10876 return SDValue(); 10877 } 10878 } 10879 } else if (ShAmtOp.getOpcode() == ISD::VECTOR_SHUFFLE && 10880 cast<ShuffleVectorSDNode>(ShAmtOp)->isSplat()) { 10881 SDValue InVec = ShAmtOp.getOperand(0); 10882 if (InVec.getOpcode() == ISD::BUILD_VECTOR) { 10883 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 10884 unsigned i = 0; 10885 for (; i != NumElts; ++i) { 10886 SDValue Arg = InVec.getOperand(i); 10887 if (Arg.getOpcode() == ISD::UNDEF) continue; 10888 BaseShAmt = Arg; 10889 break; 10890 } 10891 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) { 10892 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(InVec.getOperand(2))) { 10893 unsigned SplatIdx= cast<ShuffleVectorSDNode>(ShAmtOp)->getSplatIndex(); 10894 if (C->getZExtValue() == SplatIdx) 10895 BaseShAmt = InVec.getOperand(1); 10896 } 10897 } 10898 if (BaseShAmt.getNode() == 0) 10899 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, ShAmtOp, 10900 DAG.getIntPtrConstant(0)); 10901 } else 10902 return SDValue(); 10903 10904 // The shift amount is an i32. 10905 if (EltVT.bitsGT(MVT::i32)) 10906 BaseShAmt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, BaseShAmt); 10907 else if (EltVT.bitsLT(MVT::i32)) 10908 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, BaseShAmt); 10909 10910 // The shift amount is identical so we can do a vector shift. 10911 SDValue ValOp = N->getOperand(0); 10912 switch (N->getOpcode()) { 10913 default: 10914 llvm_unreachable("Unknown shift opcode!"); 10915 break; 10916 case ISD::SHL: 10917 if (VT == MVT::v2i64) 10918 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 10919 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32), 10920 ValOp, BaseShAmt); 10921 if (VT == MVT::v4i32) 10922 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 10923 DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32), 10924 ValOp, BaseShAmt); 10925 if (VT == MVT::v8i16) 10926 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 10927 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), 10928 ValOp, BaseShAmt); 10929 break; 10930 case ISD::SRA: 10931 if (VT == MVT::v4i32) 10932 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 10933 DAG.getConstant(Intrinsic::x86_sse2_psrai_d, MVT::i32), 10934 ValOp, BaseShAmt); 10935 if (VT == MVT::v8i16) 10936 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 10937 DAG.getConstant(Intrinsic::x86_sse2_psrai_w, MVT::i32), 10938 ValOp, BaseShAmt); 10939 break; 10940 case ISD::SRL: 10941 if (VT == MVT::v2i64) 10942 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 10943 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32), 10944 ValOp, BaseShAmt); 10945 if (VT == MVT::v4i32) 10946 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 10947 DAG.getConstant(Intrinsic::x86_sse2_psrli_d, MVT::i32), 10948 ValOp, BaseShAmt); 10949 if (VT == MVT::v8i16) 10950 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 10951 DAG.getConstant(Intrinsic::x86_sse2_psrli_w, MVT::i32), 10952 ValOp, BaseShAmt); 10953 break; 10954 } 10955 return SDValue(); 10956} 10957 10958static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, 10959 TargetLowering::DAGCombinerInfo &DCI, 10960 const X86Subtarget *Subtarget) { 10961 if (DCI.isBeforeLegalizeOps()) 10962 return SDValue(); 10963 10964 EVT VT = N->getValueType(0); 10965 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64) 10966 return SDValue(); 10967 10968 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) 10969 SDValue N0 = N->getOperand(0); 10970 SDValue N1 = N->getOperand(1); 10971 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 10972 std::swap(N0, N1); 10973 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 10974 return SDValue(); 10975 if (!N0.hasOneUse() || !N1.hasOneUse()) 10976 return SDValue(); 10977 10978 SDValue ShAmt0 = N0.getOperand(1); 10979 if (ShAmt0.getValueType() != MVT::i8) 10980 return SDValue(); 10981 SDValue ShAmt1 = N1.getOperand(1); 10982 if (ShAmt1.getValueType() != MVT::i8) 10983 return SDValue(); 10984 if (ShAmt0.getOpcode() == ISD::TRUNCATE) 10985 ShAmt0 = ShAmt0.getOperand(0); 10986 if (ShAmt1.getOpcode() == ISD::TRUNCATE) 10987 ShAmt1 = ShAmt1.getOperand(0); 10988 10989 DebugLoc DL = N->getDebugLoc(); 10990 unsigned Opc = X86ISD::SHLD; 10991 SDValue Op0 = N0.getOperand(0); 10992 SDValue Op1 = N1.getOperand(0); 10993 if (ShAmt0.getOpcode() == ISD::SUB) { 10994 Opc = X86ISD::SHRD; 10995 std::swap(Op0, Op1); 10996 std::swap(ShAmt0, ShAmt1); 10997 } 10998 10999 unsigned Bits = VT.getSizeInBits(); 11000 if (ShAmt1.getOpcode() == ISD::SUB) { 11001 SDValue Sum = ShAmt1.getOperand(0); 11002 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) { 11003 SDValue ShAmt1Op1 = ShAmt1.getOperand(1); 11004 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE) 11005 ShAmt1Op1 = ShAmt1Op1.getOperand(0); 11006 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0) 11007 return DAG.getNode(Opc, DL, VT, 11008 Op0, Op1, 11009 DAG.getNode(ISD::TRUNCATE, DL, 11010 MVT::i8, ShAmt0)); 11011 } 11012 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) { 11013 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0); 11014 if (ShAmt0C && 11015 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits) 11016 return DAG.getNode(Opc, DL, VT, 11017 N0.getOperand(0), N1.getOperand(0), 11018 DAG.getNode(ISD::TRUNCATE, DL, 11019 MVT::i8, ShAmt0)); 11020 } 11021 11022 return SDValue(); 11023} 11024 11025/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 11026static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, 11027 const X86Subtarget *Subtarget) { 11028 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 11029 // the FP state in cases where an emms may be missing. 11030 // A preferable solution to the general problem is to figure out the right 11031 // places to insert EMMS. This qualifies as a quick hack. 11032 11033 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode. 11034 StoreSDNode *St = cast<StoreSDNode>(N); 11035 EVT VT = St->getValue().getValueType(); 11036 if (VT.getSizeInBits() != 64) 11037 return SDValue(); 11038 11039 const Function *F = DAG.getMachineFunction().getFunction(); 11040 bool NoImplicitFloatOps = F->hasFnAttr(Attribute::NoImplicitFloat); 11041 bool F64IsLegal = !UseSoftFloat && !NoImplicitFloatOps 11042 && Subtarget->hasSSE2(); 11043 if ((VT.isVector() || 11044 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) && 11045 isa<LoadSDNode>(St->getValue()) && 11046 !cast<LoadSDNode>(St->getValue())->isVolatile() && 11047 St->getChain().hasOneUse() && !St->isVolatile()) { 11048 SDNode* LdVal = St->getValue().getNode(); 11049 LoadSDNode *Ld = 0; 11050 int TokenFactorIndex = -1; 11051 SmallVector<SDValue, 8> Ops; 11052 SDNode* ChainVal = St->getChain().getNode(); 11053 // Must be a store of a load. We currently handle two cases: the load 11054 // is a direct child, and it's under an intervening TokenFactor. It is 11055 // possible to dig deeper under nested TokenFactors. 11056 if (ChainVal == LdVal) 11057 Ld = cast<LoadSDNode>(St->getChain()); 11058 else if (St->getValue().hasOneUse() && 11059 ChainVal->getOpcode() == ISD::TokenFactor) { 11060 for (unsigned i=0, e = ChainVal->getNumOperands(); i != e; ++i) { 11061 if (ChainVal->getOperand(i).getNode() == LdVal) { 11062 TokenFactorIndex = i; 11063 Ld = cast<LoadSDNode>(St->getValue()); 11064 } else 11065 Ops.push_back(ChainVal->getOperand(i)); 11066 } 11067 } 11068 11069 if (!Ld || !ISD::isNormalLoad(Ld)) 11070 return SDValue(); 11071 11072 // If this is not the MMX case, i.e. we are just turning i64 load/store 11073 // into f64 load/store, avoid the transformation if there are multiple 11074 // uses of the loaded value. 11075 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0)) 11076 return SDValue(); 11077 11078 DebugLoc LdDL = Ld->getDebugLoc(); 11079 DebugLoc StDL = N->getDebugLoc(); 11080 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 11081 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store 11082 // pair instead. 11083 if (Subtarget->is64Bit() || F64IsLegal) { 11084 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64; 11085 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(), 11086 Ld->getPointerInfo(), Ld->isVolatile(), 11087 Ld->isNonTemporal(), Ld->getAlignment()); 11088 SDValue NewChain = NewLd.getValue(1); 11089 if (TokenFactorIndex != -1) { 11090 Ops.push_back(NewChain); 11091 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 11092 Ops.size()); 11093 } 11094 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(), 11095 St->getPointerInfo(), 11096 St->isVolatile(), St->isNonTemporal(), 11097 St->getAlignment()); 11098 } 11099 11100 // Otherwise, lower to two pairs of 32-bit loads / stores. 11101 SDValue LoAddr = Ld->getBasePtr(); 11102 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr, 11103 DAG.getConstant(4, MVT::i32)); 11104 11105 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr, 11106 Ld->getPointerInfo(), 11107 Ld->isVolatile(), Ld->isNonTemporal(), 11108 Ld->getAlignment()); 11109 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr, 11110 Ld->getPointerInfo().getWithOffset(4), 11111 Ld->isVolatile(), Ld->isNonTemporal(), 11112 MinAlign(Ld->getAlignment(), 4)); 11113 11114 SDValue NewChain = LoLd.getValue(1); 11115 if (TokenFactorIndex != -1) { 11116 Ops.push_back(LoLd); 11117 Ops.push_back(HiLd); 11118 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 11119 Ops.size()); 11120 } 11121 11122 LoAddr = St->getBasePtr(); 11123 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr, 11124 DAG.getConstant(4, MVT::i32)); 11125 11126 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr, 11127 St->getPointerInfo(), 11128 St->isVolatile(), St->isNonTemporal(), 11129 St->getAlignment()); 11130 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr, 11131 St->getPointerInfo().getWithOffset(4), 11132 St->isVolatile(), 11133 St->isNonTemporal(), 11134 MinAlign(St->getAlignment(), 4)); 11135 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt); 11136 } 11137 return SDValue(); 11138} 11139 11140/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 11141/// X86ISD::FXOR nodes. 11142static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 11143 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 11144 // F[X]OR(0.0, x) -> x 11145 // F[X]OR(x, 0.0) -> x 11146 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 11147 if (C->getValueAPF().isPosZero()) 11148 return N->getOperand(1); 11149 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 11150 if (C->getValueAPF().isPosZero()) 11151 return N->getOperand(0); 11152 return SDValue(); 11153} 11154 11155/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 11156static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 11157 // FAND(0.0, x) -> 0.0 11158 // FAND(x, 0.0) -> 0.0 11159 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 11160 if (C->getValueAPF().isPosZero()) 11161 return N->getOperand(0); 11162 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 11163 if (C->getValueAPF().isPosZero()) 11164 return N->getOperand(1); 11165 return SDValue(); 11166} 11167 11168static SDValue PerformBTCombine(SDNode *N, 11169 SelectionDAG &DAG, 11170 TargetLowering::DAGCombinerInfo &DCI) { 11171 // BT ignores high bits in the bit index operand. 11172 SDValue Op1 = N->getOperand(1); 11173 if (Op1.hasOneUse()) { 11174 unsigned BitWidth = Op1.getValueSizeInBits(); 11175 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth)); 11176 APInt KnownZero, KnownOne; 11177 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 11178 !DCI.isBeforeLegalizeOps()); 11179 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 11180 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) || 11181 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO)) 11182 DCI.CommitTargetLoweringOpt(TLO); 11183 } 11184 return SDValue(); 11185} 11186 11187static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) { 11188 SDValue Op = N->getOperand(0); 11189 if (Op.getOpcode() == ISD::BIT_CONVERT) 11190 Op = Op.getOperand(0); 11191 EVT VT = N->getValueType(0), OpVT = Op.getValueType(); 11192 if (Op.getOpcode() == X86ISD::VZEXT_LOAD && 11193 VT.getVectorElementType().getSizeInBits() == 11194 OpVT.getVectorElementType().getSizeInBits()) { 11195 return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, Op); 11196 } 11197 return SDValue(); 11198} 11199 11200static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG) { 11201 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) -> 11202 // (and (i32 x86isd::setcc_carry), 1) 11203 // This eliminates the zext. This transformation is necessary because 11204 // ISD::SETCC is always legalized to i8. 11205 DebugLoc dl = N->getDebugLoc(); 11206 SDValue N0 = N->getOperand(0); 11207 EVT VT = N->getValueType(0); 11208 if (N0.getOpcode() == ISD::AND && 11209 N0.hasOneUse() && 11210 N0.getOperand(0).hasOneUse()) { 11211 SDValue N00 = N0.getOperand(0); 11212 if (N00.getOpcode() != X86ISD::SETCC_CARRY) 11213 return SDValue(); 11214 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 11215 if (!C || C->getZExtValue() != 1) 11216 return SDValue(); 11217 return DAG.getNode(ISD::AND, dl, VT, 11218 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, 11219 N00.getOperand(0), N00.getOperand(1)), 11220 DAG.getConstant(1, VT)); 11221 } 11222 11223 return SDValue(); 11224} 11225 11226SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, 11227 DAGCombinerInfo &DCI) const { 11228 SelectionDAG &DAG = DCI.DAG; 11229 switch (N->getOpcode()) { 11230 default: break; 11231 case ISD::EXTRACT_VECTOR_ELT: 11232 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, *this); 11233 case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget); 11234 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI); 11235 case ISD::MUL: return PerformMulCombine(N, DAG, DCI); 11236 case ISD::SHL: 11237 case ISD::SRA: 11238 case ISD::SRL: return PerformShiftCombine(N, DAG, Subtarget); 11239 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget); 11240 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); 11241 case X86ISD::FXOR: 11242 case X86ISD::FOR: return PerformFORCombine(N, DAG); 11243 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 11244 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); 11245 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); 11246 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG); 11247 case X86ISD::SHUFPS: // Handle all target specific shuffles 11248 case X86ISD::SHUFPD: 11249 case X86ISD::PALIGN: 11250 case X86ISD::PUNPCKHBW: 11251 case X86ISD::PUNPCKHWD: 11252 case X86ISD::PUNPCKHDQ: 11253 case X86ISD::PUNPCKHQDQ: 11254 case X86ISD::UNPCKHPS: 11255 case X86ISD::UNPCKHPD: 11256 case X86ISD::PUNPCKLBW: 11257 case X86ISD::PUNPCKLWD: 11258 case X86ISD::PUNPCKLDQ: 11259 case X86ISD::PUNPCKLQDQ: 11260 case X86ISD::UNPCKLPS: 11261 case X86ISD::UNPCKLPD: 11262 case X86ISD::MOVHLPS: 11263 case X86ISD::MOVLHPS: 11264 case X86ISD::PSHUFD: 11265 case X86ISD::PSHUFHW: 11266 case X86ISD::PSHUFLW: 11267 case X86ISD::MOVSS: 11268 case X86ISD::MOVSD: 11269 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, *this); 11270 } 11271 11272 return SDValue(); 11273} 11274 11275/// isTypeDesirableForOp - Return true if the target has native support for 11276/// the specified value type and it is 'desirable' to use the type for the 11277/// given node type. e.g. On x86 i16 is legal, but undesirable since i16 11278/// instruction encodings are longer and some i16 instructions are slow. 11279bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const { 11280 if (!isTypeLegal(VT)) 11281 return false; 11282 if (VT != MVT::i16) 11283 return true; 11284 11285 switch (Opc) { 11286 default: 11287 return true; 11288 case ISD::LOAD: 11289 case ISD::SIGN_EXTEND: 11290 case ISD::ZERO_EXTEND: 11291 case ISD::ANY_EXTEND: 11292 case ISD::SHL: 11293 case ISD::SRL: 11294 case ISD::SUB: 11295 case ISD::ADD: 11296 case ISD::MUL: 11297 case ISD::AND: 11298 case ISD::OR: 11299 case ISD::XOR: 11300 return false; 11301 } 11302} 11303 11304/// IsDesirableToPromoteOp - This method query the target whether it is 11305/// beneficial for dag combiner to promote the specified node. If true, it 11306/// should return the desired promotion type by reference. 11307bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { 11308 EVT VT = Op.getValueType(); 11309 if (VT != MVT::i16) 11310 return false; 11311 11312 bool Promote = false; 11313 bool Commute = false; 11314 switch (Op.getOpcode()) { 11315 default: break; 11316 case ISD::LOAD: { 11317 LoadSDNode *LD = cast<LoadSDNode>(Op); 11318 // If the non-extending load has a single use and it's not live out, then it 11319 // might be folded. 11320 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&& 11321 Op.hasOneUse()*/) { 11322 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 11323 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 11324 // The only case where we'd want to promote LOAD (rather then it being 11325 // promoted as an operand is when it's only use is liveout. 11326 if (UI->getOpcode() != ISD::CopyToReg) 11327 return false; 11328 } 11329 } 11330 Promote = true; 11331 break; 11332 } 11333 case ISD::SIGN_EXTEND: 11334 case ISD::ZERO_EXTEND: 11335 case ISD::ANY_EXTEND: 11336 Promote = true; 11337 break; 11338 case ISD::SHL: 11339 case ISD::SRL: { 11340 SDValue N0 = Op.getOperand(0); 11341 // Look out for (store (shl (load), x)). 11342 if (MayFoldLoad(N0) && MayFoldIntoStore(Op)) 11343 return false; 11344 Promote = true; 11345 break; 11346 } 11347 case ISD::ADD: 11348 case ISD::MUL: 11349 case ISD::AND: 11350 case ISD::OR: 11351 case ISD::XOR: 11352 Commute = true; 11353 // fallthrough 11354 case ISD::SUB: { 11355 SDValue N0 = Op.getOperand(0); 11356 SDValue N1 = Op.getOperand(1); 11357 if (!Commute && MayFoldLoad(N1)) 11358 return false; 11359 // Avoid disabling potential load folding opportunities. 11360 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op))) 11361 return false; 11362 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op))) 11363 return false; 11364 Promote = true; 11365 } 11366 } 11367 11368 PVT = MVT::i32; 11369 return Promote; 11370} 11371 11372//===----------------------------------------------------------------------===// 11373// X86 Inline Assembly Support 11374//===----------------------------------------------------------------------===// 11375 11376static bool LowerToBSwap(CallInst *CI) { 11377 // FIXME: this should verify that we are targetting a 486 or better. If not, 11378 // we will turn this bswap into something that will be lowered to logical ops 11379 // instead of emitting the bswap asm. For now, we don't support 486 or lower 11380 // so don't worry about this. 11381 11382 // Verify this is a simple bswap. 11383 if (CI->getNumArgOperands() != 1 || 11384 CI->getType() != CI->getArgOperand(0)->getType() || 11385 !CI->getType()->isIntegerTy()) 11386 return false; 11387 11388 const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 11389 if (!Ty || Ty->getBitWidth() % 16 != 0) 11390 return false; 11391 11392 // Okay, we can do this xform, do so now. 11393 const Type *Tys[] = { Ty }; 11394 Module *M = CI->getParent()->getParent()->getParent(); 11395 Constant *Int = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1); 11396 11397 Value *Op = CI->getArgOperand(0); 11398 Op = CallInst::Create(Int, Op, CI->getName(), CI); 11399 11400 CI->replaceAllUsesWith(Op); 11401 CI->eraseFromParent(); 11402 return true; 11403} 11404 11405bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { 11406 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 11407 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints(); 11408 11409 std::string AsmStr = IA->getAsmString(); 11410 11411 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a" 11412 SmallVector<StringRef, 4> AsmPieces; 11413 SplitString(AsmStr, AsmPieces, ";\n"); 11414 11415 switch (AsmPieces.size()) { 11416 default: return false; 11417 case 1: 11418 AsmStr = AsmPieces[0]; 11419 AsmPieces.clear(); 11420 SplitString(AsmStr, AsmPieces, " \t"); // Split with whitespace. 11421 11422 // bswap $0 11423 if (AsmPieces.size() == 2 && 11424 (AsmPieces[0] == "bswap" || 11425 AsmPieces[0] == "bswapq" || 11426 AsmPieces[0] == "bswapl") && 11427 (AsmPieces[1] == "$0" || 11428 AsmPieces[1] == "${0:q}")) { 11429 // No need to check constraints, nothing other than the equivalent of 11430 // "=r,0" would be valid here. 11431 return LowerToBSwap(CI); 11432 } 11433 // rorw $$8, ${0:w} --> llvm.bswap.i16 11434 if (CI->getType()->isIntegerTy(16) && 11435 AsmPieces.size() == 3 && 11436 (AsmPieces[0] == "rorw" || AsmPieces[0] == "rolw") && 11437 AsmPieces[1] == "$$8," && 11438 AsmPieces[2] == "${0:w}" && 11439 IA->getConstraintString().compare(0, 5, "=r,0,") == 0) { 11440 AsmPieces.clear(); 11441 const std::string &Constraints = IA->getConstraintString(); 11442 SplitString(StringRef(Constraints).substr(5), AsmPieces, ","); 11443 std::sort(AsmPieces.begin(), AsmPieces.end()); 11444 if (AsmPieces.size() == 4 && 11445 AsmPieces[0] == "~{cc}" && 11446 AsmPieces[1] == "~{dirflag}" && 11447 AsmPieces[2] == "~{flags}" && 11448 AsmPieces[3] == "~{fpsr}") { 11449 return LowerToBSwap(CI); 11450 } 11451 } 11452 break; 11453 case 3: 11454 if (CI->getType()->isIntegerTy(64) && 11455 Constraints.size() >= 2 && 11456 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" && 11457 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") { 11458 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64 11459 SmallVector<StringRef, 4> Words; 11460 SplitString(AsmPieces[0], Words, " \t"); 11461 if (Words.size() == 2 && Words[0] == "bswap" && Words[1] == "%eax") { 11462 Words.clear(); 11463 SplitString(AsmPieces[1], Words, " \t"); 11464 if (Words.size() == 2 && Words[0] == "bswap" && Words[1] == "%edx") { 11465 Words.clear(); 11466 SplitString(AsmPieces[2], Words, " \t,"); 11467 if (Words.size() == 3 && Words[0] == "xchgl" && Words[1] == "%eax" && 11468 Words[2] == "%edx") { 11469 return LowerToBSwap(CI); 11470 } 11471 } 11472 } 11473 } 11474 break; 11475 } 11476 return false; 11477} 11478 11479 11480 11481/// getConstraintType - Given a constraint letter, return the type of 11482/// constraint it is for this target. 11483X86TargetLowering::ConstraintType 11484X86TargetLowering::getConstraintType(const std::string &Constraint) const { 11485 if (Constraint.size() == 1) { 11486 switch (Constraint[0]) { 11487 case 'R': 11488 case 'q': 11489 case 'Q': 11490 case 'f': 11491 case 't': 11492 case 'u': 11493 case 'y': 11494 case 'x': 11495 case 'Y': 11496 return C_RegisterClass; 11497 case 'a': 11498 case 'b': 11499 case 'c': 11500 case 'd': 11501 case 'S': 11502 case 'D': 11503 case 'A': 11504 return C_Register; 11505 case 'I': 11506 case 'J': 11507 case 'K': 11508 case 'L': 11509 case 'M': 11510 case 'N': 11511 case 'G': 11512 case 'C': 11513 case 'e': 11514 case 'Z': 11515 return C_Other; 11516 default: 11517 break; 11518 } 11519 } 11520 return TargetLowering::getConstraintType(Constraint); 11521} 11522 11523/// Examine constraint type and operand type and determine a weight value. 11524/// This object must already have been set up with the operand type 11525/// and the current alternative constraint selected. 11526TargetLowering::ConstraintWeight 11527 X86TargetLowering::getSingleConstraintMatchWeight( 11528 AsmOperandInfo &info, const char *constraint) const { 11529 ConstraintWeight weight = CW_Invalid; 11530 Value *CallOperandVal = info.CallOperandVal; 11531 // If we don't have a value, we can't do a match, 11532 // but allow it at the lowest weight. 11533 if (CallOperandVal == NULL) 11534 return CW_Default; 11535 const Type *type = CallOperandVal->getType(); 11536 // Look at the constraint type. 11537 switch (*constraint) { 11538 default: 11539 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 11540 case 'R': 11541 case 'q': 11542 case 'Q': 11543 case 'a': 11544 case 'b': 11545 case 'c': 11546 case 'd': 11547 case 'S': 11548 case 'D': 11549 case 'A': 11550 if (CallOperandVal->getType()->isIntegerTy()) 11551 weight = CW_SpecificReg; 11552 break; 11553 case 'f': 11554 case 't': 11555 case 'u': 11556 if (type->isFloatingPointTy()) 11557 weight = CW_SpecificReg; 11558 break; 11559 case 'y': 11560 if (type->isX86_MMXTy() && !DisableMMX && Subtarget->hasMMX()) 11561 weight = CW_SpecificReg; 11562 break; 11563 case 'x': 11564 case 'Y': 11565 if ((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) 11566 weight = CW_Register; 11567 break; 11568 case 'I': 11569 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { 11570 if (C->getZExtValue() <= 31) 11571 weight = CW_Constant; 11572 } 11573 break; 11574 case 'J': 11575 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 11576 if (C->getZExtValue() <= 63) 11577 weight = CW_Constant; 11578 } 11579 break; 11580 case 'K': 11581 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 11582 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f)) 11583 weight = CW_Constant; 11584 } 11585 break; 11586 case 'L': 11587 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 11588 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff)) 11589 weight = CW_Constant; 11590 } 11591 break; 11592 case 'M': 11593 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 11594 if (C->getZExtValue() <= 3) 11595 weight = CW_Constant; 11596 } 11597 break; 11598 case 'N': 11599 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 11600 if (C->getZExtValue() <= 0xff) 11601 weight = CW_Constant; 11602 } 11603 break; 11604 case 'G': 11605 case 'C': 11606 if (dyn_cast<ConstantFP>(CallOperandVal)) { 11607 weight = CW_Constant; 11608 } 11609 break; 11610 case 'e': 11611 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 11612 if ((C->getSExtValue() >= -0x80000000LL) && 11613 (C->getSExtValue() <= 0x7fffffffLL)) 11614 weight = CW_Constant; 11615 } 11616 break; 11617 case 'Z': 11618 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 11619 if (C->getZExtValue() <= 0xffffffff) 11620 weight = CW_Constant; 11621 } 11622 break; 11623 } 11624 return weight; 11625} 11626 11627/// LowerXConstraint - try to replace an X constraint, which matches anything, 11628/// with another that has more specific requirements based on the type of the 11629/// corresponding operand. 11630const char *X86TargetLowering:: 11631LowerXConstraint(EVT ConstraintVT) const { 11632 // FP X constraints get lowered to SSE1/2 registers if available, otherwise 11633 // 'f' like normal targets. 11634 if (ConstraintVT.isFloatingPoint()) { 11635 if (Subtarget->hasSSE2()) 11636 return "Y"; 11637 if (Subtarget->hasSSE1()) 11638 return "x"; 11639 } 11640 11641 return TargetLowering::LowerXConstraint(ConstraintVT); 11642} 11643 11644/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 11645/// vector. If it is invalid, don't add anything to Ops. 11646void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 11647 char Constraint, 11648 std::vector<SDValue>&Ops, 11649 SelectionDAG &DAG) const { 11650 SDValue Result(0, 0); 11651 11652 switch (Constraint) { 11653 default: break; 11654 case 'I': 11655 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 11656 if (C->getZExtValue() <= 31) { 11657 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 11658 break; 11659 } 11660 } 11661 return; 11662 case 'J': 11663 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 11664 if (C->getZExtValue() <= 63) { 11665 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 11666 break; 11667 } 11668 } 11669 return; 11670 case 'K': 11671 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 11672 if ((int8_t)C->getSExtValue() == C->getSExtValue()) { 11673 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 11674 break; 11675 } 11676 } 11677 return; 11678 case 'N': 11679 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 11680 if (C->getZExtValue() <= 255) { 11681 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 11682 break; 11683 } 11684 } 11685 return; 11686 case 'e': { 11687 // 32-bit signed value 11688 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 11689 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 11690 C->getSExtValue())) { 11691 // Widen to 64 bits here to get it sign extended. 11692 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64); 11693 break; 11694 } 11695 // FIXME gcc accepts some relocatable values here too, but only in certain 11696 // memory models; it's complicated. 11697 } 11698 return; 11699 } 11700 case 'Z': { 11701 // 32-bit unsigned value 11702 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 11703 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 11704 C->getZExtValue())) { 11705 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 11706 break; 11707 } 11708 } 11709 // FIXME gcc accepts some relocatable values here too, but only in certain 11710 // memory models; it's complicated. 11711 return; 11712 } 11713 case 'i': { 11714 // Literal immediates are always ok. 11715 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 11716 // Widen to 64 bits here to get it sign extended. 11717 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64); 11718 break; 11719 } 11720 11721 // In any sort of PIC mode addresses need to be computed at runtime by 11722 // adding in a register or some sort of table lookup. These can't 11723 // be used as immediates. 11724 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC()) 11725 return; 11726 11727 // If we are in non-pic codegen mode, we allow the address of a global (with 11728 // an optional displacement) to be used with 'i'. 11729 GlobalAddressSDNode *GA = 0; 11730 int64_t Offset = 0; 11731 11732 // Match either (GA), (GA+C), (GA+C1+C2), etc. 11733 while (1) { 11734 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) { 11735 Offset += GA->getOffset(); 11736 break; 11737 } else if (Op.getOpcode() == ISD::ADD) { 11738 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 11739 Offset += C->getZExtValue(); 11740 Op = Op.getOperand(0); 11741 continue; 11742 } 11743 } else if (Op.getOpcode() == ISD::SUB) { 11744 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 11745 Offset += -C->getZExtValue(); 11746 Op = Op.getOperand(0); 11747 continue; 11748 } 11749 } 11750 11751 // Otherwise, this isn't something we can handle, reject it. 11752 return; 11753 } 11754 11755 const GlobalValue *GV = GA->getGlobal(); 11756 // If we require an extra load to get this address, as in PIC mode, we 11757 // can't accept it. 11758 if (isGlobalStubReference(Subtarget->ClassifyGlobalReference(GV, 11759 getTargetMachine()))) 11760 return; 11761 11762 Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), 11763 GA->getValueType(0), Offset); 11764 break; 11765 } 11766 } 11767 11768 if (Result.getNode()) { 11769 Ops.push_back(Result); 11770 return; 11771 } 11772 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 11773} 11774 11775std::vector<unsigned> X86TargetLowering:: 11776getRegClassForInlineAsmConstraint(const std::string &Constraint, 11777 EVT VT) const { 11778 if (Constraint.size() == 1) { 11779 // FIXME: not handling fp-stack yet! 11780 switch (Constraint[0]) { // GCC X86 Constraint Letters 11781 default: break; // Unknown constraint letter 11782 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode. 11783 if (Subtarget->is64Bit()) { 11784 if (VT == MVT::i32) 11785 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 11786 X86::ESI, X86::EDI, X86::R8D, X86::R9D, 11787 X86::R10D,X86::R11D,X86::R12D, 11788 X86::R13D,X86::R14D,X86::R15D, 11789 X86::EBP, X86::ESP, 0); 11790 else if (VT == MVT::i16) 11791 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 11792 X86::SI, X86::DI, X86::R8W,X86::R9W, 11793 X86::R10W,X86::R11W,X86::R12W, 11794 X86::R13W,X86::R14W,X86::R15W, 11795 X86::BP, X86::SP, 0); 11796 else if (VT == MVT::i8) 11797 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 11798 X86::SIL, X86::DIL, X86::R8B,X86::R9B, 11799 X86::R10B,X86::R11B,X86::R12B, 11800 X86::R13B,X86::R14B,X86::R15B, 11801 X86::BPL, X86::SPL, 0); 11802 11803 else if (VT == MVT::i64) 11804 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 11805 X86::RSI, X86::RDI, X86::R8, X86::R9, 11806 X86::R10, X86::R11, X86::R12, 11807 X86::R13, X86::R14, X86::R15, 11808 X86::RBP, X86::RSP, 0); 11809 11810 break; 11811 } 11812 // 32-bit fallthrough 11813 case 'Q': // Q_REGS 11814 if (VT == MVT::i32) 11815 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 11816 else if (VT == MVT::i16) 11817 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 11818 else if (VT == MVT::i8) 11819 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0); 11820 else if (VT == MVT::i64) 11821 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0); 11822 break; 11823 } 11824 } 11825 11826 return std::vector<unsigned>(); 11827} 11828 11829std::pair<unsigned, const TargetRegisterClass*> 11830X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 11831 EVT VT) const { 11832 // First, see if this is a constraint that directly corresponds to an LLVM 11833 // register class. 11834 if (Constraint.size() == 1) { 11835 // GCC Constraint Letters 11836 switch (Constraint[0]) { 11837 default: break; 11838 case 'r': // GENERAL_REGS 11839 case 'l': // INDEX_REGS 11840 if (VT == MVT::i8) 11841 return std::make_pair(0U, X86::GR8RegisterClass); 11842 if (VT == MVT::i16) 11843 return std::make_pair(0U, X86::GR16RegisterClass); 11844 if (VT == MVT::i32 || !Subtarget->is64Bit()) 11845 return std::make_pair(0U, X86::GR32RegisterClass); 11846 return std::make_pair(0U, X86::GR64RegisterClass); 11847 case 'R': // LEGACY_REGS 11848 if (VT == MVT::i8) 11849 return std::make_pair(0U, X86::GR8_NOREXRegisterClass); 11850 if (VT == MVT::i16) 11851 return std::make_pair(0U, X86::GR16_NOREXRegisterClass); 11852 if (VT == MVT::i32 || !Subtarget->is64Bit()) 11853 return std::make_pair(0U, X86::GR32_NOREXRegisterClass); 11854 return std::make_pair(0U, X86::GR64_NOREXRegisterClass); 11855 case 'f': // FP Stack registers. 11856 // If SSE is enabled for this VT, use f80 to ensure the isel moves the 11857 // value to the correct fpstack register class. 11858 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) 11859 return std::make_pair(0U, X86::RFP32RegisterClass); 11860 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) 11861 return std::make_pair(0U, X86::RFP64RegisterClass); 11862 return std::make_pair(0U, X86::RFP80RegisterClass); 11863 case 'y': // MMX_REGS if MMX allowed. 11864 if (!Subtarget->hasMMX()) break; 11865 return std::make_pair(0U, X86::VR64RegisterClass); 11866 case 'Y': // SSE_REGS if SSE2 allowed 11867 if (!Subtarget->hasSSE2()) break; 11868 // FALL THROUGH. 11869 case 'x': // SSE_REGS if SSE1 allowed 11870 if (!Subtarget->hasSSE1()) break; 11871 11872 switch (VT.getSimpleVT().SimpleTy) { 11873 default: break; 11874 // Scalar SSE types. 11875 case MVT::f32: 11876 case MVT::i32: 11877 return std::make_pair(0U, X86::FR32RegisterClass); 11878 case MVT::f64: 11879 case MVT::i64: 11880 return std::make_pair(0U, X86::FR64RegisterClass); 11881 // Vector types. 11882 case MVT::v16i8: 11883 case MVT::v8i16: 11884 case MVT::v4i32: 11885 case MVT::v2i64: 11886 case MVT::v4f32: 11887 case MVT::v2f64: 11888 return std::make_pair(0U, X86::VR128RegisterClass); 11889 } 11890 break; 11891 } 11892 } 11893 11894 // Use the default implementation in TargetLowering to convert the register 11895 // constraint into a member of a register class. 11896 std::pair<unsigned, const TargetRegisterClass*> Res; 11897 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 11898 11899 // Not found as a standard register? 11900 if (Res.second == 0) { 11901 // Map st(0) -> st(7) -> ST0 11902 if (Constraint.size() == 7 && Constraint[0] == '{' && 11903 tolower(Constraint[1]) == 's' && 11904 tolower(Constraint[2]) == 't' && 11905 Constraint[3] == '(' && 11906 (Constraint[4] >= '0' && Constraint[4] <= '7') && 11907 Constraint[5] == ')' && 11908 Constraint[6] == '}') { 11909 11910 Res.first = X86::ST0+Constraint[4]-'0'; 11911 Res.second = X86::RFP80RegisterClass; 11912 return Res; 11913 } 11914 11915 // GCC allows "st(0)" to be called just plain "st". 11916 if (StringRef("{st}").equals_lower(Constraint)) { 11917 Res.first = X86::ST0; 11918 Res.second = X86::RFP80RegisterClass; 11919 return Res; 11920 } 11921 11922 // flags -> EFLAGS 11923 if (StringRef("{flags}").equals_lower(Constraint)) { 11924 Res.first = X86::EFLAGS; 11925 Res.second = X86::CCRRegisterClass; 11926 return Res; 11927 } 11928 11929 // 'A' means EAX + EDX. 11930 if (Constraint == "A") { 11931 Res.first = X86::EAX; 11932 Res.second = X86::GR32_ADRegisterClass; 11933 return Res; 11934 } 11935 return Res; 11936 } 11937 11938 // Otherwise, check to see if this is a register class of the wrong value 11939 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 11940 // turn into {ax},{dx}. 11941 if (Res.second->hasType(VT)) 11942 return Res; // Correct type already, nothing to do. 11943 11944 // All of the single-register GCC register classes map their values onto 11945 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 11946 // really want an 8-bit or 32-bit register, map to the appropriate register 11947 // class and return the appropriate register. 11948 if (Res.second == X86::GR16RegisterClass) { 11949 if (VT == MVT::i8) { 11950 unsigned DestReg = 0; 11951 switch (Res.first) { 11952 default: break; 11953 case X86::AX: DestReg = X86::AL; break; 11954 case X86::DX: DestReg = X86::DL; break; 11955 case X86::CX: DestReg = X86::CL; break; 11956 case X86::BX: DestReg = X86::BL; break; 11957 } 11958 if (DestReg) { 11959 Res.first = DestReg; 11960 Res.second = X86::GR8RegisterClass; 11961 } 11962 } else if (VT == MVT::i32) { 11963 unsigned DestReg = 0; 11964 switch (Res.first) { 11965 default: break; 11966 case X86::AX: DestReg = X86::EAX; break; 11967 case X86::DX: DestReg = X86::EDX; break; 11968 case X86::CX: DestReg = X86::ECX; break; 11969 case X86::BX: DestReg = X86::EBX; break; 11970 case X86::SI: DestReg = X86::ESI; break; 11971 case X86::DI: DestReg = X86::EDI; break; 11972 case X86::BP: DestReg = X86::EBP; break; 11973 case X86::SP: DestReg = X86::ESP; break; 11974 } 11975 if (DestReg) { 11976 Res.first = DestReg; 11977 Res.second = X86::GR32RegisterClass; 11978 } 11979 } else if (VT == MVT::i64) { 11980 unsigned DestReg = 0; 11981 switch (Res.first) { 11982 default: break; 11983 case X86::AX: DestReg = X86::RAX; break; 11984 case X86::DX: DestReg = X86::RDX; break; 11985 case X86::CX: DestReg = X86::RCX; break; 11986 case X86::BX: DestReg = X86::RBX; break; 11987 case X86::SI: DestReg = X86::RSI; break; 11988 case X86::DI: DestReg = X86::RDI; break; 11989 case X86::BP: DestReg = X86::RBP; break; 11990 case X86::SP: DestReg = X86::RSP; break; 11991 } 11992 if (DestReg) { 11993 Res.first = DestReg; 11994 Res.second = X86::GR64RegisterClass; 11995 } 11996 } 11997 } else if (Res.second == X86::FR32RegisterClass || 11998 Res.second == X86::FR64RegisterClass || 11999 Res.second == X86::VR128RegisterClass) { 12000 // Handle references to XMM physical registers that got mapped into the 12001 // wrong class. This can happen with constraints like {xmm0} where the 12002 // target independent register mapper will just pick the first match it can 12003 // find, ignoring the required type. 12004 if (VT == MVT::f32) 12005 Res.second = X86::FR32RegisterClass; 12006 else if (VT == MVT::f64) 12007 Res.second = X86::FR64RegisterClass; 12008 else if (X86::VR128RegisterClass->hasType(VT)) 12009 Res.second = X86::VR128RegisterClass; 12010 } 12011 12012 return Res; 12013} 12014