X86ISelLowering.cpp revision 0488fb649a56b7fc89a5814df5308813f9e5a85d
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86.h" 17#include "X86InstrBuilder.h" 18#include "X86ISelLowering.h" 19#include "X86ShuffleDecode.h" 20#include "X86TargetMachine.h" 21#include "X86TargetObjectFile.h" 22#include "llvm/CallingConv.h" 23#include "llvm/Constants.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalAlias.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/Function.h" 28#include "llvm/Instructions.h" 29#include "llvm/Intrinsics.h" 30#include "llvm/LLVMContext.h" 31#include "llvm/CodeGen/MachineFrameInfo.h" 32#include "llvm/CodeGen/MachineFunction.h" 33#include "llvm/CodeGen/MachineInstrBuilder.h" 34#include "llvm/CodeGen/MachineJumpTableInfo.h" 35#include "llvm/CodeGen/MachineModuleInfo.h" 36#include "llvm/CodeGen/MachineRegisterInfo.h" 37#include "llvm/CodeGen/PseudoSourceValue.h" 38#include "llvm/MC/MCAsmInfo.h" 39#include "llvm/MC/MCContext.h" 40#include "llvm/MC/MCExpr.h" 41#include "llvm/MC/MCSymbol.h" 42#include "llvm/ADT/BitVector.h" 43#include "llvm/ADT/SmallSet.h" 44#include "llvm/ADT/Statistic.h" 45#include "llvm/ADT/StringExtras.h" 46#include "llvm/ADT/VectorExtras.h" 47#include "llvm/Support/CommandLine.h" 48#include "llvm/Support/Debug.h" 49#include "llvm/Support/Dwarf.h" 50#include "llvm/Support/ErrorHandling.h" 51#include "llvm/Support/MathExtras.h" 52#include "llvm/Support/raw_ostream.h" 53using namespace llvm; 54using namespace dwarf; 55 56STATISTIC(NumTailCalls, "Number of tail calls"); 57 58static cl::opt<bool> 59DisableMMX("disable-mmx", cl::Hidden, cl::desc("Disable use of MMX")); 60 61// Forward declarations. 62static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 63 SDValue V2); 64 65static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { 66 67 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit(); 68 69 if (TM.getSubtarget<X86Subtarget>().isTargetDarwin()) { 70 if (is64Bit) return new X8664_MachoTargetObjectFile(); 71 return new TargetLoweringObjectFileMachO(); 72 } else if (TM.getSubtarget<X86Subtarget>().isTargetELF() ){ 73 if (is64Bit) return new X8664_ELFTargetObjectFile(TM); 74 return new X8632_ELFTargetObjectFile(TM); 75 } else if (TM.getSubtarget<X86Subtarget>().isTargetCOFF()) { 76 return new TargetLoweringObjectFileCOFF(); 77 } 78 llvm_unreachable("unknown subtarget type"); 79} 80 81X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) 82 : TargetLowering(TM, createTLOF(TM)) { 83 Subtarget = &TM.getSubtarget<X86Subtarget>(); 84 X86ScalarSSEf64 = Subtarget->hasSSE2(); 85 X86ScalarSSEf32 = Subtarget->hasSSE1(); 86 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 87 88 RegInfo = TM.getRegisterInfo(); 89 TD = getTargetData(); 90 91 // Set up the TargetLowering object. 92 93 // X86 is weird, it always uses i8 for shift amounts and setcc results. 94 setShiftAmountType(MVT::i8); 95 setBooleanContents(ZeroOrOneBooleanContent); 96 setSchedulingPreference(Sched::RegPressure); 97 setStackPointerRegisterToSaveRestore(X86StackPtr); 98 99 if (Subtarget->isTargetDarwin()) { 100 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 101 setUseUnderscoreSetJmp(false); 102 setUseUnderscoreLongJmp(false); 103 } else if (Subtarget->isTargetMingw()) { 104 // MS runtime is weird: it exports _setjmp, but longjmp! 105 setUseUnderscoreSetJmp(true); 106 setUseUnderscoreLongJmp(false); 107 } else { 108 setUseUnderscoreSetJmp(true); 109 setUseUnderscoreLongJmp(true); 110 } 111 112 // Set up the register classes. 113 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 114 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 115 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 116 if (Subtarget->is64Bit()) 117 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 118 119 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 120 121 // We don't accept any truncstore of integer registers. 122 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 123 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 124 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 125 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 126 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 127 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 128 129 // SETOEQ and SETUNE require checking two conditions. 130 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); 131 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); 132 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand); 133 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand); 134 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); 135 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand); 136 137 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 138 // operation. 139 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 140 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 141 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 142 143 if (Subtarget->is64Bit()) { 144 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 145 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 146 } else if (!UseSoftFloat) { 147 // We have an algorithm for SSE2->double, and we turn this into a 148 // 64-bit FILD followed by conditional FADD for other targets. 149 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 150 // We have an algorithm for SSE2, and we turn this into a 64-bit 151 // FILD for other targets. 152 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom); 153 } 154 155 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 156 // this operation. 157 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 158 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 159 160 if (!UseSoftFloat) { 161 // SSE has no i16 to fp conversion, only i32 162 if (X86ScalarSSEf32) { 163 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 164 // f32 and f64 cases are Legal, f80 case is not 165 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 166 } else { 167 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 168 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 169 } 170 } else { 171 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 172 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote); 173 } 174 175 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 176 // are Legal, f80 is custom lowered. 177 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 178 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 179 180 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 181 // this operation. 182 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 183 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 184 185 if (X86ScalarSSEf32) { 186 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 187 // f32 and f64 cases are Legal, f80 case is not 188 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 189 } else { 190 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 191 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 192 } 193 194 // Handle FP_TO_UINT by promoting the destination to a larger signed 195 // conversion. 196 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 197 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 198 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 199 200 if (Subtarget->is64Bit()) { 201 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 202 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 203 } else if (!UseSoftFloat) { 204 if (X86ScalarSSEf32 && !Subtarget->hasSSE3()) 205 // Expand FP_TO_UINT into a select. 206 // FIXME: We would like to use a Custom expander here eventually to do 207 // the optimal thing for SSE vs. the default expansion in the legalizer. 208 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 209 else 210 // With SSE3 we can use fisttpll to convert to a signed i64; without 211 // SSE, we're stuck with a fistpll. 212 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom); 213 } 214 215 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 216 if (!X86ScalarSSEf64) { 217 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 218 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 219 if (Subtarget->is64Bit()) { 220 setOperationAction(ISD::BIT_CONVERT , MVT::f64 , Expand); 221 // Without SSE, i64->f64 goes through memory. 222 setOperationAction(ISD::BIT_CONVERT , MVT::i64 , Expand); 223 } 224 } 225 226 // Scalar integer divide and remainder are lowered to use operations that 227 // produce two results, to match the available instructions. This exposes 228 // the two-result form to trivial CSE, which is able to combine x/y and x%y 229 // into a single instruction. 230 // 231 // Scalar integer multiply-high is also lowered to use two-result 232 // operations, to match the available instructions. However, plain multiply 233 // (low) operations are left as Legal, as there are single-result 234 // instructions for this in x86. Using the two-result multiply instructions 235 // when both high and low results are needed must be arranged by dagcombine. 236 setOperationAction(ISD::MULHS , MVT::i8 , Expand); 237 setOperationAction(ISD::MULHU , MVT::i8 , Expand); 238 setOperationAction(ISD::SDIV , MVT::i8 , Expand); 239 setOperationAction(ISD::UDIV , MVT::i8 , Expand); 240 setOperationAction(ISD::SREM , MVT::i8 , Expand); 241 setOperationAction(ISD::UREM , MVT::i8 , Expand); 242 setOperationAction(ISD::MULHS , MVT::i16 , Expand); 243 setOperationAction(ISD::MULHU , MVT::i16 , Expand); 244 setOperationAction(ISD::SDIV , MVT::i16 , Expand); 245 setOperationAction(ISD::UDIV , MVT::i16 , Expand); 246 setOperationAction(ISD::SREM , MVT::i16 , Expand); 247 setOperationAction(ISD::UREM , MVT::i16 , Expand); 248 setOperationAction(ISD::MULHS , MVT::i32 , Expand); 249 setOperationAction(ISD::MULHU , MVT::i32 , Expand); 250 setOperationAction(ISD::SDIV , MVT::i32 , Expand); 251 setOperationAction(ISD::UDIV , MVT::i32 , Expand); 252 setOperationAction(ISD::SREM , MVT::i32 , Expand); 253 setOperationAction(ISD::UREM , MVT::i32 , Expand); 254 setOperationAction(ISD::MULHS , MVT::i64 , Expand); 255 setOperationAction(ISD::MULHU , MVT::i64 , Expand); 256 setOperationAction(ISD::SDIV , MVT::i64 , Expand); 257 setOperationAction(ISD::UDIV , MVT::i64 , Expand); 258 setOperationAction(ISD::SREM , MVT::i64 , Expand); 259 setOperationAction(ISD::UREM , MVT::i64 , Expand); 260 261 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 262 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 263 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 264 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 265 if (Subtarget->is64Bit()) 266 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 267 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 268 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 269 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 270 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 271 setOperationAction(ISD::FREM , MVT::f32 , Expand); 272 setOperationAction(ISD::FREM , MVT::f64 , Expand); 273 setOperationAction(ISD::FREM , MVT::f80 , Expand); 274 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 275 276 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 277 setOperationAction(ISD::CTTZ , MVT::i8 , Custom); 278 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 279 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 280 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 281 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 282 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 283 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 284 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 285 if (Subtarget->is64Bit()) { 286 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 287 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 288 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 289 } 290 291 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 292 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 293 294 // These should be promoted to a larger select which is supported. 295 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 296 // X86 wants to expand cmov itself. 297 setOperationAction(ISD::SELECT , MVT::i8 , Custom); 298 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 299 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 300 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 301 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 302 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 303 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 304 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 305 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 306 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 307 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 308 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 309 if (Subtarget->is64Bit()) { 310 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 311 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 312 } 313 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 314 315 // Darwin ABI issue. 316 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 317 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 318 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 319 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 320 if (Subtarget->is64Bit()) 321 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 322 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 323 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom); 324 if (Subtarget->is64Bit()) { 325 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 326 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 327 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 328 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 329 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom); 330 } 331 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 332 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 333 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 334 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 335 if (Subtarget->is64Bit()) { 336 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 337 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 338 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 339 } 340 341 if (Subtarget->hasSSE1()) 342 setOperationAction(ISD::PREFETCH , MVT::Other, Legal); 343 344 // We may not have a libcall for MEMBARRIER so we should lower this. 345 setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom); 346 347 // On X86 and X86-64, atomic operations are lowered to locked instructions. 348 // Locked instructions, in turn, have implicit fence semantics (all memory 349 // operations are flushed before issuing the locked instruction, and they 350 // are not buffered), so we can fold away the common pattern of 351 // fence-atomic-fence. 352 setShouldFoldAtomicFences(true); 353 354 // Expand certain atomics 355 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Custom); 356 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, Custom); 357 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 358 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 359 360 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i8, Custom); 361 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i16, Custom); 362 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); 363 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 364 365 if (!Subtarget->is64Bit()) { 366 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 367 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 368 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 369 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 370 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 371 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom); 372 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 373 } 374 375 // FIXME - use subtarget debug flags 376 if (!Subtarget->isTargetDarwin() && 377 !Subtarget->isTargetELF() && 378 !Subtarget->isTargetCygMing()) { 379 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); 380 } 381 382 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 383 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 384 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 385 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 386 if (Subtarget->is64Bit()) { 387 setExceptionPointerRegister(X86::RAX); 388 setExceptionSelectorRegister(X86::RDX); 389 } else { 390 setExceptionPointerRegister(X86::EAX); 391 setExceptionSelectorRegister(X86::EDX); 392 } 393 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 394 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom); 395 396 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 397 398 setOperationAction(ISD::TRAP, MVT::Other, Legal); 399 400 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 401 setOperationAction(ISD::VASTART , MVT::Other, Custom); 402 setOperationAction(ISD::VAEND , MVT::Other, Expand); 403 if (Subtarget->is64Bit()) { 404 setOperationAction(ISD::VAARG , MVT::Other, Custom); 405 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 406 } else { 407 setOperationAction(ISD::VAARG , MVT::Other, Expand); 408 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 409 } 410 411 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 412 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 413 if (Subtarget->is64Bit()) 414 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 415 if (Subtarget->isTargetCygMing()) 416 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 417 else 418 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 419 420 if (!UseSoftFloat && X86ScalarSSEf64) { 421 // f32 and f64 use SSE. 422 // Set up the FP register classes. 423 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 424 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 425 426 // Use ANDPD to simulate FABS. 427 setOperationAction(ISD::FABS , MVT::f64, Custom); 428 setOperationAction(ISD::FABS , MVT::f32, Custom); 429 430 // Use XORP to simulate FNEG. 431 setOperationAction(ISD::FNEG , MVT::f64, Custom); 432 setOperationAction(ISD::FNEG , MVT::f32, Custom); 433 434 // Use ANDPD and ORPD to simulate FCOPYSIGN. 435 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 436 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 437 438 // We don't support sin/cos/fmod 439 setOperationAction(ISD::FSIN , MVT::f64, Expand); 440 setOperationAction(ISD::FCOS , MVT::f64, Expand); 441 setOperationAction(ISD::FSIN , MVT::f32, Expand); 442 setOperationAction(ISD::FCOS , MVT::f32, Expand); 443 444 // Expand FP immediates into loads from the stack, except for the special 445 // cases we handle. 446 addLegalFPImmediate(APFloat(+0.0)); // xorpd 447 addLegalFPImmediate(APFloat(+0.0f)); // xorps 448 } else if (!UseSoftFloat && X86ScalarSSEf32) { 449 // Use SSE for f32, x87 for f64. 450 // Set up the FP register classes. 451 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 452 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 453 454 // Use ANDPS to simulate FABS. 455 setOperationAction(ISD::FABS , MVT::f32, Custom); 456 457 // Use XORP to simulate FNEG. 458 setOperationAction(ISD::FNEG , MVT::f32, Custom); 459 460 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 461 462 // Use ANDPS and ORPS to simulate FCOPYSIGN. 463 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 464 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 465 466 // We don't support sin/cos/fmod 467 setOperationAction(ISD::FSIN , MVT::f32, Expand); 468 setOperationAction(ISD::FCOS , MVT::f32, Expand); 469 470 // Special cases we handle for FP constants. 471 addLegalFPImmediate(APFloat(+0.0f)); // xorps 472 addLegalFPImmediate(APFloat(+0.0)); // FLD0 473 addLegalFPImmediate(APFloat(+1.0)); // FLD1 474 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 475 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 476 477 if (!UnsafeFPMath) { 478 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 479 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 480 } 481 } else if (!UseSoftFloat) { 482 // f32 and f64 in x87. 483 // Set up the FP register classes. 484 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 485 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 486 487 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 488 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 489 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 490 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 491 492 if (!UnsafeFPMath) { 493 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 494 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 495 } 496 addLegalFPImmediate(APFloat(+0.0)); // FLD0 497 addLegalFPImmediate(APFloat(+1.0)); // FLD1 498 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 499 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 500 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 501 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 502 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 503 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 504 } 505 506 // Long double always uses X87. 507 if (!UseSoftFloat) { 508 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 509 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 510 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 511 { 512 bool ignored; 513 APFloat TmpFlt(+0.0); 514 TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven, 515 &ignored); 516 addLegalFPImmediate(TmpFlt); // FLD0 517 TmpFlt.changeSign(); 518 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 519 APFloat TmpFlt2(+1.0); 520 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven, 521 &ignored); 522 addLegalFPImmediate(TmpFlt2); // FLD1 523 TmpFlt2.changeSign(); 524 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 525 } 526 527 if (!UnsafeFPMath) { 528 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 529 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 530 } 531 } 532 533 // Always use a library call for pow. 534 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 535 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 536 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 537 538 setOperationAction(ISD::FLOG, MVT::f80, Expand); 539 setOperationAction(ISD::FLOG2, MVT::f80, Expand); 540 setOperationAction(ISD::FLOG10, MVT::f80, Expand); 541 setOperationAction(ISD::FEXP, MVT::f80, Expand); 542 setOperationAction(ISD::FEXP2, MVT::f80, Expand); 543 544 // First set operation action for all vector types to either promote 545 // (for widening) or expand (for scalarization). Then we will selectively 546 // turn on ones that can be effectively codegen'd. 547 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 548 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 549 setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand); 550 setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand); 551 setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand); 552 setOperationAction(ISD::FNEG, (MVT::SimpleValueType)VT, Expand); 553 setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand); 554 setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand); 555 setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand); 556 setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand); 557 setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand); 558 setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand); 559 setOperationAction(ISD::SREM, (MVT::SimpleValueType)VT, Expand); 560 setOperationAction(ISD::UREM, (MVT::SimpleValueType)VT, Expand); 561 setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand); 562 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand); 563 setOperationAction(ISD::EXTRACT_VECTOR_ELT,(MVT::SimpleValueType)VT,Expand); 564 setOperationAction(ISD::EXTRACT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 565 setOperationAction(ISD::INSERT_VECTOR_ELT,(MVT::SimpleValueType)VT, Expand); 566 setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand); 567 setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand); 568 setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand); 569 setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand); 570 setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand); 571 setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand); 572 setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand); 573 setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 574 setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 575 setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand); 576 setOperationAction(ISD::UDIVREM, (MVT::SimpleValueType)VT, Expand); 577 setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand); 578 setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand); 579 setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand); 580 setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand); 581 setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand); 582 setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand); 583 setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand); 584 setOperationAction(ISD::ROTL, (MVT::SimpleValueType)VT, Expand); 585 setOperationAction(ISD::ROTR, (MVT::SimpleValueType)VT, Expand); 586 setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand); 587 setOperationAction(ISD::VSETCC, (MVT::SimpleValueType)VT, Expand); 588 setOperationAction(ISD::FLOG, (MVT::SimpleValueType)VT, Expand); 589 setOperationAction(ISD::FLOG2, (MVT::SimpleValueType)VT, Expand); 590 setOperationAction(ISD::FLOG10, (MVT::SimpleValueType)VT, Expand); 591 setOperationAction(ISD::FEXP, (MVT::SimpleValueType)VT, Expand); 592 setOperationAction(ISD::FEXP2, (MVT::SimpleValueType)VT, Expand); 593 setOperationAction(ISD::FP_TO_UINT, (MVT::SimpleValueType)VT, Expand); 594 setOperationAction(ISD::FP_TO_SINT, (MVT::SimpleValueType)VT, Expand); 595 setOperationAction(ISD::UINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 596 setOperationAction(ISD::SINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 597 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT,Expand); 598 setOperationAction(ISD::TRUNCATE, (MVT::SimpleValueType)VT, Expand); 599 setOperationAction(ISD::SIGN_EXTEND, (MVT::SimpleValueType)VT, Expand); 600 setOperationAction(ISD::ZERO_EXTEND, (MVT::SimpleValueType)VT, Expand); 601 setOperationAction(ISD::ANY_EXTEND, (MVT::SimpleValueType)VT, Expand); 602 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 603 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 604 setTruncStoreAction((MVT::SimpleValueType)VT, 605 (MVT::SimpleValueType)InnerVT, Expand); 606 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); 607 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); 608 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); 609 } 610 611 // FIXME: In order to prevent SSE instructions being expanded to MMX ones 612 // with -msoft-float, disable use of MMX as well. 613 if (!UseSoftFloat && !DisableMMX && Subtarget->hasMMX()) { 614 addRegisterClass(MVT::x86mmx, X86::VR64RegisterClass, false); 615 // No operations on x86mmx supported, everything uses intrinsics. 616 } 617 618 // MMX-sized vectors (other than x86mmx) are expected to be expanded 619 // into smaller operations. 620 setOperationAction(ISD::MULHS, MVT::v8i8, Expand); 621 setOperationAction(ISD::MULHS, MVT::v4i16, Expand); 622 setOperationAction(ISD::MULHS, MVT::v2i32, Expand); 623 setOperationAction(ISD::MULHS, MVT::v1i64, Expand); 624 setOperationAction(ISD::AND, MVT::v8i8, Expand); 625 setOperationAction(ISD::AND, MVT::v4i16, Expand); 626 setOperationAction(ISD::AND, MVT::v2i32, Expand); 627 setOperationAction(ISD::AND, MVT::v1i64, Expand); 628 setOperationAction(ISD::OR, MVT::v8i8, Expand); 629 setOperationAction(ISD::OR, MVT::v4i16, Expand); 630 setOperationAction(ISD::OR, MVT::v2i32, Expand); 631 setOperationAction(ISD::OR, MVT::v1i64, Expand); 632 setOperationAction(ISD::XOR, MVT::v8i8, Expand); 633 setOperationAction(ISD::XOR, MVT::v4i16, Expand); 634 setOperationAction(ISD::XOR, MVT::v2i32, Expand); 635 setOperationAction(ISD::XOR, MVT::v1i64, Expand); 636 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand); 637 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand); 638 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand); 639 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand); 640 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand); 641 setOperationAction(ISD::SELECT, MVT::v8i8, Expand); 642 setOperationAction(ISD::SELECT, MVT::v4i16, Expand); 643 setOperationAction(ISD::SELECT, MVT::v2i32, Expand); 644 setOperationAction(ISD::SELECT, MVT::v1i64, Expand); 645 setOperationAction(ISD::BIT_CONVERT, MVT::v8i8, Expand); 646 setOperationAction(ISD::BIT_CONVERT, MVT::v4i16, Expand); 647 setOperationAction(ISD::BIT_CONVERT, MVT::v2i32, Expand); 648 setOperationAction(ISD::BIT_CONVERT, MVT::v1i64, Expand); 649 650 if (!UseSoftFloat && Subtarget->hasSSE1()) { 651 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 652 653 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 654 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 655 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 656 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 657 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 658 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 659 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 660 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 661 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 662 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 663 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 664 setOperationAction(ISD::VSETCC, MVT::v4f32, Custom); 665 } 666 667 if (!UseSoftFloat && Subtarget->hasSSE2()) { 668 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 669 670 // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM 671 // registers cannot be used even for integer operations. 672 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 673 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 674 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 675 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 676 677 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 678 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 679 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 680 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 681 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 682 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 683 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 684 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 685 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 686 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 687 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 688 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 689 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 690 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 691 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 692 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 693 694 setOperationAction(ISD::VSETCC, MVT::v2f64, Custom); 695 setOperationAction(ISD::VSETCC, MVT::v16i8, Custom); 696 setOperationAction(ISD::VSETCC, MVT::v8i16, Custom); 697 setOperationAction(ISD::VSETCC, MVT::v4i32, Custom); 698 699 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 700 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 701 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 702 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 703 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 704 705 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Custom); 706 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Custom); 707 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Custom); 708 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Custom); 709 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom); 710 711 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 712 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; ++i) { 713 EVT VT = (MVT::SimpleValueType)i; 714 // Do not attempt to custom lower non-power-of-2 vectors 715 if (!isPowerOf2_32(VT.getVectorNumElements())) 716 continue; 717 // Do not attempt to custom lower non-128-bit vectors 718 if (!VT.is128BitVector()) 719 continue; 720 setOperationAction(ISD::BUILD_VECTOR, 721 VT.getSimpleVT().SimpleTy, Custom); 722 setOperationAction(ISD::VECTOR_SHUFFLE, 723 VT.getSimpleVT().SimpleTy, Custom); 724 setOperationAction(ISD::EXTRACT_VECTOR_ELT, 725 VT.getSimpleVT().SimpleTy, Custom); 726 } 727 728 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 729 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 730 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 731 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 732 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 733 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 734 735 if (Subtarget->is64Bit()) { 736 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 737 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 738 } 739 740 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 741 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; i++) { 742 MVT::SimpleValueType SVT = (MVT::SimpleValueType)i; 743 EVT VT = SVT; 744 745 // Do not attempt to promote non-128-bit vectors 746 if (!VT.is128BitVector()) 747 continue; 748 749 setOperationAction(ISD::AND, SVT, Promote); 750 AddPromotedToType (ISD::AND, SVT, MVT::v2i64); 751 setOperationAction(ISD::OR, SVT, Promote); 752 AddPromotedToType (ISD::OR, SVT, MVT::v2i64); 753 setOperationAction(ISD::XOR, SVT, Promote); 754 AddPromotedToType (ISD::XOR, SVT, MVT::v2i64); 755 setOperationAction(ISD::LOAD, SVT, Promote); 756 AddPromotedToType (ISD::LOAD, SVT, MVT::v2i64); 757 setOperationAction(ISD::SELECT, SVT, Promote); 758 AddPromotedToType (ISD::SELECT, SVT, MVT::v2i64); 759 } 760 761 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 762 763 // Custom lower v2i64 and v2f64 selects. 764 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 765 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 766 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 767 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 768 769 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 770 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 771 } 772 773 if (Subtarget->hasSSE41()) { 774 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 775 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 776 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 777 setOperationAction(ISD::FRINT, MVT::f32, Legal); 778 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 779 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 780 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 781 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 782 setOperationAction(ISD::FRINT, MVT::f64, Legal); 783 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 784 785 // FIXME: Do we need to handle scalar-to-vector here? 786 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 787 788 // Can turn SHL into an integer multiply. 789 setOperationAction(ISD::SHL, MVT::v4i32, Custom); 790 setOperationAction(ISD::SHL, MVT::v16i8, Custom); 791 792 // i8 and i16 vectors are custom , because the source register and source 793 // source memory operand types are not the same width. f32 vectors are 794 // custom since the immediate controlling the insert encodes additional 795 // information. 796 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 797 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 798 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 799 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 800 801 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 802 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 803 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom); 804 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 805 806 if (Subtarget->is64Bit()) { 807 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal); 808 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 809 } 810 } 811 812 if (Subtarget->hasSSE42()) { 813 setOperationAction(ISD::VSETCC, MVT::v2i64, Custom); 814 } 815 816 if (!UseSoftFloat && Subtarget->hasAVX()) { 817 addRegisterClass(MVT::v8f32, X86::VR256RegisterClass); 818 addRegisterClass(MVT::v4f64, X86::VR256RegisterClass); 819 addRegisterClass(MVT::v8i32, X86::VR256RegisterClass); 820 addRegisterClass(MVT::v4i64, X86::VR256RegisterClass); 821 addRegisterClass(MVT::v32i8, X86::VR256RegisterClass); 822 823 setOperationAction(ISD::LOAD, MVT::v8f32, Legal); 824 setOperationAction(ISD::LOAD, MVT::v8i32, Legal); 825 setOperationAction(ISD::LOAD, MVT::v4f64, Legal); 826 setOperationAction(ISD::LOAD, MVT::v4i64, Legal); 827 setOperationAction(ISD::FADD, MVT::v8f32, Legal); 828 setOperationAction(ISD::FSUB, MVT::v8f32, Legal); 829 setOperationAction(ISD::FMUL, MVT::v8f32, Legal); 830 setOperationAction(ISD::FDIV, MVT::v8f32, Legal); 831 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal); 832 setOperationAction(ISD::FNEG, MVT::v8f32, Custom); 833 setOperationAction(ISD::BUILD_VECTOR, MVT::v8f32, Custom); 834 //setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Custom); 835 //setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8f32, Custom); 836 //setOperationAction(ISD::SELECT, MVT::v8f32, Custom); 837 //setOperationAction(ISD::VSETCC, MVT::v8f32, Custom); 838 839 // Operations to consider commented out -v16i16 v32i8 840 //setOperationAction(ISD::ADD, MVT::v16i16, Legal); 841 setOperationAction(ISD::ADD, MVT::v8i32, Custom); 842 setOperationAction(ISD::ADD, MVT::v4i64, Custom); 843 //setOperationAction(ISD::SUB, MVT::v32i8, Legal); 844 //setOperationAction(ISD::SUB, MVT::v16i16, Legal); 845 setOperationAction(ISD::SUB, MVT::v8i32, Custom); 846 setOperationAction(ISD::SUB, MVT::v4i64, Custom); 847 //setOperationAction(ISD::MUL, MVT::v16i16, Legal); 848 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 849 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 850 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 851 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 852 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 853 setOperationAction(ISD::FNEG, MVT::v4f64, Custom); 854 855 setOperationAction(ISD::VSETCC, MVT::v4f64, Custom); 856 // setOperationAction(ISD::VSETCC, MVT::v32i8, Custom); 857 // setOperationAction(ISD::VSETCC, MVT::v16i16, Custom); 858 setOperationAction(ISD::VSETCC, MVT::v8i32, Custom); 859 860 // setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32i8, Custom); 861 // setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i16, Custom); 862 // setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i16, Custom); 863 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i32, Custom); 864 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8f32, Custom); 865 866 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 867 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i64, Custom); 868 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f64, Custom); 869 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i64, Custom); 870 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f64, Custom); 871 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f64, Custom); 872 873#if 0 874 // Not sure we want to do this since there are no 256-bit integer 875 // operations in AVX 876 877 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 878 // This includes 256-bit vectors 879 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v4i64; ++i) { 880 EVT VT = (MVT::SimpleValueType)i; 881 882 // Do not attempt to custom lower non-power-of-2 vectors 883 if (!isPowerOf2_32(VT.getVectorNumElements())) 884 continue; 885 886 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 887 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 888 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 889 } 890 891 if (Subtarget->is64Bit()) { 892 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i64, Custom); 893 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i64, Custom); 894 } 895#endif 896 897#if 0 898 // Not sure we want to do this since there are no 256-bit integer 899 // operations in AVX 900 901 // Promote v32i8, v16i16, v8i32 load, select, and, or, xor to v4i64. 902 // Including 256-bit vectors 903 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v4i64; i++) { 904 EVT VT = (MVT::SimpleValueType)i; 905 906 if (!VT.is256BitVector()) { 907 continue; 908 } 909 setOperationAction(ISD::AND, VT, Promote); 910 AddPromotedToType (ISD::AND, VT, MVT::v4i64); 911 setOperationAction(ISD::OR, VT, Promote); 912 AddPromotedToType (ISD::OR, VT, MVT::v4i64); 913 setOperationAction(ISD::XOR, VT, Promote); 914 AddPromotedToType (ISD::XOR, VT, MVT::v4i64); 915 setOperationAction(ISD::LOAD, VT, Promote); 916 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64); 917 setOperationAction(ISD::SELECT, VT, Promote); 918 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64); 919 } 920 921 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 922#endif 923 } 924 925 // We want to custom lower some of our intrinsics. 926 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 927 928 // Add/Sub/Mul with overflow operations are custom lowered. 929 setOperationAction(ISD::SADDO, MVT::i32, Custom); 930 setOperationAction(ISD::UADDO, MVT::i32, Custom); 931 setOperationAction(ISD::SSUBO, MVT::i32, Custom); 932 setOperationAction(ISD::USUBO, MVT::i32, Custom); 933 setOperationAction(ISD::SMULO, MVT::i32, Custom); 934 935 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't 936 // handle type legalization for these operations here. 937 // 938 // FIXME: We really should do custom legalization for addition and 939 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better 940 // than generic legalization for 64-bit multiplication-with-overflow, though. 941 if (Subtarget->is64Bit()) { 942 setOperationAction(ISD::SADDO, MVT::i64, Custom); 943 setOperationAction(ISD::UADDO, MVT::i64, Custom); 944 setOperationAction(ISD::SSUBO, MVT::i64, Custom); 945 setOperationAction(ISD::USUBO, MVT::i64, Custom); 946 setOperationAction(ISD::SMULO, MVT::i64, Custom); 947 } 948 949 if (!Subtarget->is64Bit()) { 950 // These libcalls are not available in 32-bit. 951 setLibcallName(RTLIB::SHL_I128, 0); 952 setLibcallName(RTLIB::SRL_I128, 0); 953 setLibcallName(RTLIB::SRA_I128, 0); 954 } 955 956 // We have target-specific dag combine patterns for the following nodes: 957 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 958 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 959 setTargetDAGCombine(ISD::BUILD_VECTOR); 960 setTargetDAGCombine(ISD::SELECT); 961 setTargetDAGCombine(ISD::SHL); 962 setTargetDAGCombine(ISD::SRA); 963 setTargetDAGCombine(ISD::SRL); 964 setTargetDAGCombine(ISD::OR); 965 setTargetDAGCombine(ISD::STORE); 966 setTargetDAGCombine(ISD::ZERO_EXTEND); 967 if (Subtarget->is64Bit()) 968 setTargetDAGCombine(ISD::MUL); 969 970 computeRegisterProperties(); 971 972 // FIXME: These should be based on subtarget info. Plus, the values should 973 // be smaller when we are in optimizing for size mode. 974 maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores 975 maxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores 976 maxStoresPerMemmove = 3; // For @llvm.memmove -> sequence of stores 977 setPrefLoopAlignment(16); 978 benefitFromCodePlacementOpt = true; 979} 980 981 982MVT::SimpleValueType X86TargetLowering::getSetCCResultType(EVT VT) const { 983 return MVT::i8; 984} 985 986 987/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 988/// the desired ByVal argument alignment. 989static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) { 990 if (MaxAlign == 16) 991 return; 992 if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 993 if (VTy->getBitWidth() == 128) 994 MaxAlign = 16; 995 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 996 unsigned EltAlign = 0; 997 getMaxByValAlign(ATy->getElementType(), EltAlign); 998 if (EltAlign > MaxAlign) 999 MaxAlign = EltAlign; 1000 } else if (const StructType *STy = dyn_cast<StructType>(Ty)) { 1001 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1002 unsigned EltAlign = 0; 1003 getMaxByValAlign(STy->getElementType(i), EltAlign); 1004 if (EltAlign > MaxAlign) 1005 MaxAlign = EltAlign; 1006 if (MaxAlign == 16) 1007 break; 1008 } 1009 } 1010 return; 1011} 1012 1013/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1014/// function arguments in the caller parameter area. For X86, aggregates 1015/// that contain SSE vectors are placed at 16-byte boundaries while the rest 1016/// are at 4-byte boundaries. 1017unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const { 1018 if (Subtarget->is64Bit()) { 1019 // Max of 8 and alignment of type. 1020 unsigned TyAlign = TD->getABITypeAlignment(Ty); 1021 if (TyAlign > 8) 1022 return TyAlign; 1023 return 8; 1024 } 1025 1026 unsigned Align = 4; 1027 if (Subtarget->hasSSE1()) 1028 getMaxByValAlign(Ty, Align); 1029 return Align; 1030} 1031 1032/// getOptimalMemOpType - Returns the target specific optimal type for load 1033/// and store operations as a result of memset, memcpy, and memmove 1034/// lowering. If DstAlign is zero that means it's safe to destination 1035/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 1036/// means there isn't a need to check it against alignment requirement, 1037/// probably because the source does not need to be loaded. If 1038/// 'NonScalarIntSafe' is true, that means it's safe to return a 1039/// non-scalar-integer type, e.g. empty string source, constant, or loaded 1040/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 1041/// constant so it does not need to be loaded. 1042/// It returns EVT::Other if the type should be determined using generic 1043/// target-independent logic. 1044EVT 1045X86TargetLowering::getOptimalMemOpType(uint64_t Size, 1046 unsigned DstAlign, unsigned SrcAlign, 1047 bool NonScalarIntSafe, 1048 bool MemcpyStrSrc, 1049 MachineFunction &MF) const { 1050 // FIXME: This turns off use of xmm stores for memset/memcpy on targets like 1051 // linux. This is because the stack realignment code can't handle certain 1052 // cases like PR2962. This should be removed when PR2962 is fixed. 1053 const Function *F = MF.getFunction(); 1054 if (NonScalarIntSafe && 1055 !F->hasFnAttr(Attribute::NoImplicitFloat)) { 1056 if (Size >= 16 && 1057 (Subtarget->isUnalignedMemAccessFast() || 1058 ((DstAlign == 0 || DstAlign >= 16) && 1059 (SrcAlign == 0 || SrcAlign >= 16))) && 1060 Subtarget->getStackAlignment() >= 16) { 1061 if (Subtarget->hasSSE2()) 1062 return MVT::v4i32; 1063 if (Subtarget->hasSSE1()) 1064 return MVT::v4f32; 1065 } else if (!MemcpyStrSrc && Size >= 8 && 1066 !Subtarget->is64Bit() && 1067 Subtarget->getStackAlignment() >= 8 && 1068 Subtarget->hasSSE2()) { 1069 // Do not use f64 to lower memcpy if source is string constant. It's 1070 // better to use i32 to avoid the loads. 1071 return MVT::f64; 1072 } 1073 } 1074 if (Subtarget->is64Bit() && Size >= 8) 1075 return MVT::i64; 1076 return MVT::i32; 1077} 1078 1079/// getJumpTableEncoding - Return the entry encoding for a jump table in the 1080/// current function. The returned value is a member of the 1081/// MachineJumpTableInfo::JTEntryKind enum. 1082unsigned X86TargetLowering::getJumpTableEncoding() const { 1083 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF 1084 // symbol. 1085 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1086 Subtarget->isPICStyleGOT()) 1087 return MachineJumpTableInfo::EK_Custom32; 1088 1089 // Otherwise, use the normal jump table encoding heuristics. 1090 return TargetLowering::getJumpTableEncoding(); 1091} 1092 1093/// getPICBaseSymbol - Return the X86-32 PIC base. 1094MCSymbol * 1095X86TargetLowering::getPICBaseSymbol(const MachineFunction *MF, 1096 MCContext &Ctx) const { 1097 const MCAsmInfo &MAI = *getTargetMachine().getMCAsmInfo(); 1098 return Ctx.GetOrCreateSymbol(Twine(MAI.getPrivateGlobalPrefix())+ 1099 Twine(MF->getFunctionNumber())+"$pb"); 1100} 1101 1102 1103const MCExpr * 1104X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, 1105 const MachineBasicBlock *MBB, 1106 unsigned uid,MCContext &Ctx) const{ 1107 assert(getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1108 Subtarget->isPICStyleGOT()); 1109 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF 1110 // entries. 1111 return MCSymbolRefExpr::Create(MBB->getSymbol(), 1112 MCSymbolRefExpr::VK_GOTOFF, Ctx); 1113} 1114 1115/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 1116/// jumptable. 1117SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, 1118 SelectionDAG &DAG) const { 1119 if (!Subtarget->is64Bit()) 1120 // This doesn't have DebugLoc associated with it, but is not really the 1121 // same as a Register. 1122 return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()); 1123 return Table; 1124} 1125 1126/// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 1127/// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 1128/// MCExpr. 1129const MCExpr *X86TargetLowering:: 1130getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, 1131 MCContext &Ctx) const { 1132 // X86-64 uses RIP relative addressing based on the jump table label. 1133 if (Subtarget->isPICStyleRIPRel()) 1134 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 1135 1136 // Otherwise, the reference is relative to the PIC base. 1137 return MCSymbolRefExpr::Create(getPICBaseSymbol(MF, Ctx), Ctx); 1138} 1139 1140/// getFunctionAlignment - Return the Log2 alignment of this function. 1141unsigned X86TargetLowering::getFunctionAlignment(const Function *F) const { 1142 return F->hasFnAttr(Attribute::OptimizeForSize) ? 0 : 4; 1143} 1144 1145std::pair<const TargetRegisterClass*, uint8_t> 1146X86TargetLowering::findRepresentativeClass(EVT VT) const{ 1147 const TargetRegisterClass *RRC = 0; 1148 uint8_t Cost = 1; 1149 switch (VT.getSimpleVT().SimpleTy) { 1150 default: 1151 return TargetLowering::findRepresentativeClass(VT); 1152 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: 1153 RRC = (Subtarget->is64Bit() 1154 ? X86::GR64RegisterClass : X86::GR32RegisterClass); 1155 break; 1156 case MVT::x86mmx: 1157 RRC = X86::VR64RegisterClass; 1158 break; 1159 case MVT::f32: case MVT::f64: 1160 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1161 case MVT::v4f32: case MVT::v2f64: 1162 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32: 1163 case MVT::v4f64: 1164 RRC = X86::VR128RegisterClass; 1165 break; 1166 } 1167 return std::make_pair(RRC, Cost); 1168} 1169 1170unsigned 1171X86TargetLowering::getRegPressureLimit(const TargetRegisterClass *RC, 1172 MachineFunction &MF) const { 1173 unsigned FPDiff = RegInfo->hasFP(MF) ? 1 : 0; 1174 switch (RC->getID()) { 1175 default: 1176 return 0; 1177 case X86::GR32RegClassID: 1178 return 4 - FPDiff; 1179 case X86::GR64RegClassID: 1180 return 8 - FPDiff; 1181 case X86::VR128RegClassID: 1182 return Subtarget->is64Bit() ? 10 : 4; 1183 case X86::VR64RegClassID: 1184 return 4; 1185 } 1186} 1187 1188bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace, 1189 unsigned &Offset) const { 1190 if (!Subtarget->isTargetLinux()) 1191 return false; 1192 1193 if (Subtarget->is64Bit()) { 1194 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs: 1195 Offset = 0x28; 1196 if (getTargetMachine().getCodeModel() == CodeModel::Kernel) 1197 AddressSpace = 256; 1198 else 1199 AddressSpace = 257; 1200 } else { 1201 // %gs:0x14 on i386 1202 Offset = 0x14; 1203 AddressSpace = 256; 1204 } 1205 return true; 1206} 1207 1208 1209//===----------------------------------------------------------------------===// 1210// Return Value Calling Convention Implementation 1211//===----------------------------------------------------------------------===// 1212 1213#include "X86GenCallingConv.inc" 1214 1215bool 1216X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, 1217 const SmallVectorImpl<ISD::OutputArg> &Outs, 1218 LLVMContext &Context) const { 1219 SmallVector<CCValAssign, 16> RVLocs; 1220 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 1221 RVLocs, Context); 1222 return CCInfo.CheckReturn(Outs, RetCC_X86); 1223} 1224 1225SDValue 1226X86TargetLowering::LowerReturn(SDValue Chain, 1227 CallingConv::ID CallConv, bool isVarArg, 1228 const SmallVectorImpl<ISD::OutputArg> &Outs, 1229 const SmallVectorImpl<SDValue> &OutVals, 1230 DebugLoc dl, SelectionDAG &DAG) const { 1231 MachineFunction &MF = DAG.getMachineFunction(); 1232 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1233 1234 SmallVector<CCValAssign, 16> RVLocs; 1235 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 1236 RVLocs, *DAG.getContext()); 1237 CCInfo.AnalyzeReturn(Outs, RetCC_X86); 1238 1239 // Add the regs to the liveout set for the function. 1240 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 1241 for (unsigned i = 0; i != RVLocs.size(); ++i) 1242 if (RVLocs[i].isRegLoc() && !MRI.isLiveOut(RVLocs[i].getLocReg())) 1243 MRI.addLiveOut(RVLocs[i].getLocReg()); 1244 1245 SDValue Flag; 1246 1247 SmallVector<SDValue, 6> RetOps; 1248 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1249 // Operand #1 = Bytes To Pop 1250 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), 1251 MVT::i16)); 1252 1253 // Copy the result values into the output registers. 1254 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1255 CCValAssign &VA = RVLocs[i]; 1256 assert(VA.isRegLoc() && "Can only return in registers!"); 1257 SDValue ValToCopy = OutVals[i]; 1258 EVT ValVT = ValToCopy.getValueType(); 1259 1260 // If this is x86-64, and we disabled SSE, we can't return FP values, 1261 // or SSE or MMX vectors. 1262 if ((ValVT == MVT::f32 || ValVT == MVT::f64 || 1263 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) && 1264 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) { 1265 report_fatal_error("SSE register return with SSE disabled"); 1266 } 1267 // Likewise we can't return F64 values with SSE1 only. gcc does so, but 1268 // llvm-gcc has never done it right and no one has noticed, so this 1269 // should be OK for now. 1270 if (ValVT == MVT::f64 && 1271 (Subtarget->is64Bit() && !Subtarget->hasSSE2())) 1272 report_fatal_error("SSE2 register return with SSE2 disabled"); 1273 1274 // Returns in ST0/ST1 are handled specially: these are pushed as operands to 1275 // the RET instruction and handled by the FP Stackifier. 1276 if (VA.getLocReg() == X86::ST0 || 1277 VA.getLocReg() == X86::ST1) { 1278 // If this is a copy from an xmm register to ST(0), use an FPExtend to 1279 // change the value to the FP stack register class. 1280 if (isScalarFPTypeInSSEReg(VA.getValVT())) 1281 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy); 1282 RetOps.push_back(ValToCopy); 1283 // Don't emit a copytoreg. 1284 continue; 1285 } 1286 1287 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64 1288 // which is returned in RAX / RDX. 1289 if (Subtarget->is64Bit()) { 1290 if (ValVT == MVT::x86mmx) { 1291 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { 1292 ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, ValToCopy); 1293 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 1294 ValToCopy); 1295 // If we don't have SSE2 available, convert to v4f32 so the generated 1296 // register is legal. 1297 if (!Subtarget->hasSSE2()) 1298 ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32,ValToCopy); 1299 } 1300 } 1301 } 1302 1303 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag); 1304 Flag = Chain.getValue(1); 1305 } 1306 1307 // The x86-64 ABI for returning structs by value requires that we copy 1308 // the sret argument into %rax for the return. We saved the argument into 1309 // a virtual register in the entry block, so now we copy the value out 1310 // and into %rax. 1311 if (Subtarget->is64Bit() && 1312 DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { 1313 MachineFunction &MF = DAG.getMachineFunction(); 1314 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1315 unsigned Reg = FuncInfo->getSRetReturnReg(); 1316 assert(Reg && 1317 "SRetReturnReg should have been set in LowerFormalArguments()."); 1318 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); 1319 1320 Chain = DAG.getCopyToReg(Chain, dl, X86::RAX, Val, Flag); 1321 Flag = Chain.getValue(1); 1322 1323 // RAX now acts like a return value. 1324 MRI.addLiveOut(X86::RAX); 1325 } 1326 1327 RetOps[0] = Chain; // Update chain. 1328 1329 // Add the flag if we have it. 1330 if (Flag.getNode()) 1331 RetOps.push_back(Flag); 1332 1333 return DAG.getNode(X86ISD::RET_FLAG, dl, 1334 MVT::Other, &RetOps[0], RetOps.size()); 1335} 1336 1337/// LowerCallResult - Lower the result values of a call into the 1338/// appropriate copies out of appropriate physical registers. 1339/// 1340SDValue 1341X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1342 CallingConv::ID CallConv, bool isVarArg, 1343 const SmallVectorImpl<ISD::InputArg> &Ins, 1344 DebugLoc dl, SelectionDAG &DAG, 1345 SmallVectorImpl<SDValue> &InVals) const { 1346 1347 // Assign locations to each value returned by this call. 1348 SmallVector<CCValAssign, 16> RVLocs; 1349 bool Is64Bit = Subtarget->is64Bit(); 1350 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 1351 RVLocs, *DAG.getContext()); 1352 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 1353 1354 // Copy all of the result registers out of their specified physreg. 1355 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1356 CCValAssign &VA = RVLocs[i]; 1357 EVT CopyVT = VA.getValVT(); 1358 1359 // If this is x86-64, and we disabled SSE, we can't return FP values 1360 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) && 1361 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) { 1362 report_fatal_error("SSE register return with SSE disabled"); 1363 } 1364 1365 SDValue Val; 1366 1367 // If this is a call to a function that returns an fp value on the floating 1368 // point stack, we must guarantee the the value is popped from the stack, so 1369 // a CopyFromReg is not good enough - the copy instruction may be eliminated 1370 // if the return value is not used. We use the FpGET_ST0 instructions 1371 // instead. 1372 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) { 1373 // If we prefer to use the value in xmm registers, copy it out as f80 and 1374 // use a truncate to move it from fp stack reg to xmm reg. 1375 if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80; 1376 bool isST0 = VA.getLocReg() == X86::ST0; 1377 unsigned Opc = 0; 1378 if (CopyVT == MVT::f32) Opc = isST0 ? X86::FpGET_ST0_32:X86::FpGET_ST1_32; 1379 if (CopyVT == MVT::f64) Opc = isST0 ? X86::FpGET_ST0_64:X86::FpGET_ST1_64; 1380 if (CopyVT == MVT::f80) Opc = isST0 ? X86::FpGET_ST0_80:X86::FpGET_ST1_80; 1381 SDValue Ops[] = { Chain, InFlag }; 1382 Chain = SDValue(DAG.getMachineNode(Opc, dl, CopyVT, MVT::Other, MVT::Flag, 1383 Ops, 2), 1); 1384 Val = Chain.getValue(0); 1385 1386 // Round the f80 to the right size, which also moves it to the appropriate 1387 // xmm register. 1388 if (CopyVT != VA.getValVT()) 1389 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val, 1390 // This truncation won't change the value. 1391 DAG.getIntPtrConstant(1)); 1392 } else if (Is64Bit && CopyVT.isVector() && CopyVT.getSizeInBits() == 64) { 1393 // For x86-64, MMX values are returned in XMM0 / XMM1 except for v1i64. 1394 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { 1395 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), 1396 MVT::v2i64, InFlag).getValue(1); 1397 Val = Chain.getValue(0); 1398 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, 1399 Val, DAG.getConstant(0, MVT::i64)); 1400 } else { 1401 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), 1402 MVT::i64, InFlag).getValue(1); 1403 Val = Chain.getValue(0); 1404 } 1405 Val = DAG.getNode(ISD::BIT_CONVERT, dl, CopyVT, Val); 1406 } else { 1407 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), 1408 CopyVT, InFlag).getValue(1); 1409 Val = Chain.getValue(0); 1410 } 1411 InFlag = Chain.getValue(2); 1412 InVals.push_back(Val); 1413 } 1414 1415 return Chain; 1416} 1417 1418 1419//===----------------------------------------------------------------------===// 1420// C & StdCall & Fast Calling Convention implementation 1421//===----------------------------------------------------------------------===// 1422// StdCall calling convention seems to be standard for many Windows' API 1423// routines and around. It differs from C calling convention just a little: 1424// callee should clean up the stack, not caller. Symbols should be also 1425// decorated in some fancy way :) It doesn't support any vector arguments. 1426// For info on fast calling convention see Fast Calling Convention (tail call) 1427// implementation LowerX86_32FastCCCallTo. 1428 1429/// CallIsStructReturn - Determines whether a call uses struct return 1430/// semantics. 1431static bool CallIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) { 1432 if (Outs.empty()) 1433 return false; 1434 1435 return Outs[0].Flags.isSRet(); 1436} 1437 1438/// ArgsAreStructReturn - Determines whether a function uses struct 1439/// return semantics. 1440static bool 1441ArgsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) { 1442 if (Ins.empty()) 1443 return false; 1444 1445 return Ins[0].Flags.isSRet(); 1446} 1447 1448/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 1449/// given CallingConvention value. 1450CCAssignFn *X86TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const { 1451 if (Subtarget->is64Bit()) { 1452 if (CC == CallingConv::GHC) 1453 return CC_X86_64_GHC; 1454 else if (Subtarget->isTargetWin64()) 1455 return CC_X86_Win64_C; 1456 else 1457 return CC_X86_64_C; 1458 } 1459 1460 if (CC == CallingConv::X86_FastCall) 1461 return CC_X86_32_FastCall; 1462 else if (CC == CallingConv::X86_ThisCall) 1463 return CC_X86_32_ThisCall; 1464 else if (CC == CallingConv::Fast) 1465 return CC_X86_32_FastCC; 1466 else if (CC == CallingConv::GHC) 1467 return CC_X86_32_GHC; 1468 else 1469 return CC_X86_32_C; 1470} 1471 1472/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1473/// by "Src" to address "Dst" with size and alignment information specified by 1474/// the specific parameter attribute. The copy will be passed as a byval 1475/// function parameter. 1476static SDValue 1477CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1478 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1479 DebugLoc dl) { 1480 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1481 1482 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1483 /*isVolatile*/false, /*AlwaysInline=*/true, 1484 MachinePointerInfo(), MachinePointerInfo()); 1485} 1486 1487/// IsTailCallConvention - Return true if the calling convention is one that 1488/// supports tail call optimization. 1489static bool IsTailCallConvention(CallingConv::ID CC) { 1490 return (CC == CallingConv::Fast || CC == CallingConv::GHC); 1491} 1492 1493/// FuncIsMadeTailCallSafe - Return true if the function is being made into 1494/// a tailcall target by changing its ABI. 1495static bool FuncIsMadeTailCallSafe(CallingConv::ID CC) { 1496 return GuaranteedTailCallOpt && IsTailCallConvention(CC); 1497} 1498 1499SDValue 1500X86TargetLowering::LowerMemArgument(SDValue Chain, 1501 CallingConv::ID CallConv, 1502 const SmallVectorImpl<ISD::InputArg> &Ins, 1503 DebugLoc dl, SelectionDAG &DAG, 1504 const CCValAssign &VA, 1505 MachineFrameInfo *MFI, 1506 unsigned i) const { 1507 // Create the nodes corresponding to a load from this parameter slot. 1508 ISD::ArgFlagsTy Flags = Ins[i].Flags; 1509 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv); 1510 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); 1511 EVT ValVT; 1512 1513 // If value is passed by pointer we have address passed instead of the value 1514 // itself. 1515 if (VA.getLocInfo() == CCValAssign::Indirect) 1516 ValVT = VA.getLocVT(); 1517 else 1518 ValVT = VA.getValVT(); 1519 1520 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1521 // changed with more analysis. 1522 // In case of tail call optimization mark all arguments mutable. Since they 1523 // could be overwritten by lowering of arguments in case of a tail call. 1524 if (Flags.isByVal()) { 1525 int FI = MFI->CreateFixedObject(Flags.getByValSize(), 1526 VA.getLocMemOffset(), isImmutable); 1527 return DAG.getFrameIndex(FI, getPointerTy()); 1528 } else { 1529 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, 1530 VA.getLocMemOffset(), isImmutable); 1531 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1532 return DAG.getLoad(ValVT, dl, Chain, FIN, 1533 MachinePointerInfo::getFixedStack(FI), 1534 false, false, 0); 1535 } 1536} 1537 1538SDValue 1539X86TargetLowering::LowerFormalArguments(SDValue Chain, 1540 CallingConv::ID CallConv, 1541 bool isVarArg, 1542 const SmallVectorImpl<ISD::InputArg> &Ins, 1543 DebugLoc dl, 1544 SelectionDAG &DAG, 1545 SmallVectorImpl<SDValue> &InVals) 1546 const { 1547 MachineFunction &MF = DAG.getMachineFunction(); 1548 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1549 1550 const Function* Fn = MF.getFunction(); 1551 if (Fn->hasExternalLinkage() && 1552 Subtarget->isTargetCygMing() && 1553 Fn->getName() == "main") 1554 FuncInfo->setForceFramePointer(true); 1555 1556 MachineFrameInfo *MFI = MF.getFrameInfo(); 1557 bool Is64Bit = Subtarget->is64Bit(); 1558 bool IsWin64 = Subtarget->isTargetWin64(); 1559 1560 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 1561 "Var args not supported with calling convention fastcc or ghc"); 1562 1563 // Assign locations to all of the incoming arguments. 1564 SmallVector<CCValAssign, 16> ArgLocs; 1565 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 1566 ArgLocs, *DAG.getContext()); 1567 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv)); 1568 1569 unsigned LastVal = ~0U; 1570 SDValue ArgValue; 1571 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1572 CCValAssign &VA = ArgLocs[i]; 1573 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1574 // places. 1575 assert(VA.getValNo() != LastVal && 1576 "Don't support value assigned to multiple locs yet"); 1577 LastVal = VA.getValNo(); 1578 1579 if (VA.isRegLoc()) { 1580 EVT RegVT = VA.getLocVT(); 1581 TargetRegisterClass *RC = NULL; 1582 if (RegVT == MVT::i32) 1583 RC = X86::GR32RegisterClass; 1584 else if (Is64Bit && RegVT == MVT::i64) 1585 RC = X86::GR64RegisterClass; 1586 else if (RegVT == MVT::f32) 1587 RC = X86::FR32RegisterClass; 1588 else if (RegVT == MVT::f64) 1589 RC = X86::FR64RegisterClass; 1590 else if (RegVT.isVector() && RegVT.getSizeInBits() == 256) 1591 RC = X86::VR256RegisterClass; 1592 else if (RegVT.isVector() && RegVT.getSizeInBits() == 128) 1593 RC = X86::VR128RegisterClass; 1594 else if (RegVT == MVT::x86mmx) 1595 RC = X86::VR64RegisterClass; 1596 else 1597 llvm_unreachable("Unknown argument type!"); 1598 1599 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1600 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 1601 1602 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1603 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1604 // right size. 1605 if (VA.getLocInfo() == CCValAssign::SExt) 1606 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 1607 DAG.getValueType(VA.getValVT())); 1608 else if (VA.getLocInfo() == CCValAssign::ZExt) 1609 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 1610 DAG.getValueType(VA.getValVT())); 1611 else if (VA.getLocInfo() == CCValAssign::BCvt) 1612 ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue); 1613 1614 if (VA.isExtInLoc()) { 1615 // Handle MMX values passed in XMM regs. 1616 if (RegVT.isVector()) { 1617 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), 1618 ArgValue); 1619 } else 1620 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 1621 } 1622 } else { 1623 assert(VA.isMemLoc()); 1624 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i); 1625 } 1626 1627 // If value is passed via pointer - do a load. 1628 if (VA.getLocInfo() == CCValAssign::Indirect) 1629 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, 1630 MachinePointerInfo(), false, false, 0); 1631 1632 InVals.push_back(ArgValue); 1633 } 1634 1635 // The x86-64 ABI for returning structs by value requires that we copy 1636 // the sret argument into %rax for the return. Save the argument into 1637 // a virtual register so that we can access it from the return points. 1638 if (Is64Bit && MF.getFunction()->hasStructRetAttr()) { 1639 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1640 unsigned Reg = FuncInfo->getSRetReturnReg(); 1641 if (!Reg) { 1642 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); 1643 FuncInfo->setSRetReturnReg(Reg); 1644 } 1645 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 1646 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 1647 } 1648 1649 unsigned StackSize = CCInfo.getNextStackOffset(); 1650 // Align stack specially for tail calls. 1651 if (FuncIsMadeTailCallSafe(CallConv)) 1652 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1653 1654 // If the function takes variable number of arguments, make a frame index for 1655 // the start of the first vararg value... for expansion of llvm.va_start. 1656 if (isVarArg) { 1657 if (Is64Bit || (CallConv != CallingConv::X86_FastCall && 1658 CallConv != CallingConv::X86_ThisCall)) { 1659 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,true)); 1660 } 1661 if (Is64Bit) { 1662 unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0; 1663 1664 // FIXME: We should really autogenerate these arrays 1665 static const unsigned GPR64ArgRegsWin64[] = { 1666 X86::RCX, X86::RDX, X86::R8, X86::R9 1667 }; 1668 static const unsigned XMMArgRegsWin64[] = { 1669 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 1670 }; 1671 static const unsigned GPR64ArgRegs64Bit[] = { 1672 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1673 }; 1674 static const unsigned XMMArgRegs64Bit[] = { 1675 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1676 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1677 }; 1678 const unsigned *GPR64ArgRegs, *XMMArgRegs; 1679 1680 if (IsWin64) { 1681 TotalNumIntRegs = 4; TotalNumXMMRegs = 4; 1682 GPR64ArgRegs = GPR64ArgRegsWin64; 1683 XMMArgRegs = XMMArgRegsWin64; 1684 } else { 1685 TotalNumIntRegs = 6; TotalNumXMMRegs = 8; 1686 GPR64ArgRegs = GPR64ArgRegs64Bit; 1687 XMMArgRegs = XMMArgRegs64Bit; 1688 } 1689 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 1690 TotalNumIntRegs); 1691 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 1692 TotalNumXMMRegs); 1693 1694 bool NoImplicitFloatOps = Fn->hasFnAttr(Attribute::NoImplicitFloat); 1695 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) && 1696 "SSE register cannot be used when SSE is disabled!"); 1697 assert(!(NumXMMRegs && UseSoftFloat && NoImplicitFloatOps) && 1698 "SSE register cannot be used when SSE is disabled!"); 1699 if (UseSoftFloat || NoImplicitFloatOps || !Subtarget->hasSSE1()) 1700 // Kernel mode asks for SSE to be disabled, so don't push them 1701 // on the stack. 1702 TotalNumXMMRegs = 0; 1703 1704 // For X86-64, if there are vararg parameters that are passed via 1705 // registers, then we must store them to their spots on the stack so they 1706 // may be loaded by deferencing the result of va_next. 1707 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8); 1708 FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16); 1709 FuncInfo->setRegSaveFrameIndex( 1710 MFI->CreateStackObject(TotalNumIntRegs * 8 + TotalNumXMMRegs * 16, 16, 1711 false)); 1712 1713 // Store the integer parameter registers. 1714 SmallVector<SDValue, 8> MemOps; 1715 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 1716 getPointerTy()); 1717 unsigned Offset = FuncInfo->getVarArgsGPOffset(); 1718 for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { 1719 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN, 1720 DAG.getIntPtrConstant(Offset)); 1721 unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs], 1722 X86::GR64RegisterClass); 1723 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 1724 SDValue Store = 1725 DAG.getStore(Val.getValue(1), dl, Val, FIN, 1726 MachinePointerInfo::getFixedStack( 1727 FuncInfo->getRegSaveFrameIndex(), Offset), 1728 false, false, 0); 1729 MemOps.push_back(Store); 1730 Offset += 8; 1731 } 1732 1733 if (TotalNumXMMRegs != 0 && NumXMMRegs != TotalNumXMMRegs) { 1734 // Now store the XMM (fp + vector) parameter registers. 1735 SmallVector<SDValue, 11> SaveXMMOps; 1736 SaveXMMOps.push_back(Chain); 1737 1738 unsigned AL = MF.addLiveIn(X86::AL, X86::GR8RegisterClass); 1739 SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8); 1740 SaveXMMOps.push_back(ALVal); 1741 1742 SaveXMMOps.push_back(DAG.getIntPtrConstant( 1743 FuncInfo->getRegSaveFrameIndex())); 1744 SaveXMMOps.push_back(DAG.getIntPtrConstant( 1745 FuncInfo->getVarArgsFPOffset())); 1746 1747 for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { 1748 unsigned VReg = MF.addLiveIn(XMMArgRegs[NumXMMRegs], 1749 X86::VR128RegisterClass); 1750 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32); 1751 SaveXMMOps.push_back(Val); 1752 } 1753 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl, 1754 MVT::Other, 1755 &SaveXMMOps[0], SaveXMMOps.size())); 1756 } 1757 1758 if (!MemOps.empty()) 1759 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1760 &MemOps[0], MemOps.size()); 1761 } 1762 } 1763 1764 // Some CCs need callee pop. 1765 if (Subtarget->IsCalleePop(isVarArg, CallConv)) { 1766 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything. 1767 } else { 1768 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing. 1769 // If this is an sret function, the return should pop the hidden pointer. 1770 if (!Is64Bit && !IsTailCallConvention(CallConv) && ArgsAreStructReturn(Ins)) 1771 FuncInfo->setBytesToPopOnReturn(4); 1772 } 1773 1774 if (!Is64Bit) { 1775 // RegSaveFrameIndex is X86-64 only. 1776 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA); 1777 if (CallConv == CallingConv::X86_FastCall || 1778 CallConv == CallingConv::X86_ThisCall) 1779 // fastcc functions can't have varargs. 1780 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA); 1781 } 1782 1783 return Chain; 1784} 1785 1786SDValue 1787X86TargetLowering::LowerMemOpCallTo(SDValue Chain, 1788 SDValue StackPtr, SDValue Arg, 1789 DebugLoc dl, SelectionDAG &DAG, 1790 const CCValAssign &VA, 1791 ISD::ArgFlagsTy Flags) const { 1792 const unsigned FirstStackArgOffset = (Subtarget->isTargetWin64() ? 32 : 0); 1793 unsigned LocMemOffset = FirstStackArgOffset + VA.getLocMemOffset(); 1794 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1795 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1796 if (Flags.isByVal()) 1797 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 1798 1799 return DAG.getStore(Chain, dl, Arg, PtrOff, 1800 MachinePointerInfo::getStack(LocMemOffset), 1801 false, false, 0); 1802} 1803 1804/// EmitTailCallLoadRetAddr - Emit a load of return address if tail call 1805/// optimization is performed and it is required. 1806SDValue 1807X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, 1808 SDValue &OutRetAddr, SDValue Chain, 1809 bool IsTailCall, bool Is64Bit, 1810 int FPDiff, DebugLoc dl) const { 1811 // Adjust the Return address stack slot. 1812 EVT VT = getPointerTy(); 1813 OutRetAddr = getReturnAddressFrameIndex(DAG); 1814 1815 // Load the "old" Return address. 1816 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(), 1817 false, false, 0); 1818 return SDValue(OutRetAddr.getNode(), 1); 1819} 1820 1821/// EmitTailCallStoreRetAddr - Emit a store of the return adress if tail call 1822/// optimization is performed and it is required (FPDiff!=0). 1823static SDValue 1824EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, 1825 SDValue Chain, SDValue RetAddrFrIdx, 1826 bool Is64Bit, int FPDiff, DebugLoc dl) { 1827 // Store the return address to the appropriate stack slot. 1828 if (!FPDiff) return Chain; 1829 // Calculate the new stack slot for the return address. 1830 int SlotSize = Is64Bit ? 8 : 4; 1831 int NewReturnAddrFI = 1832 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false); 1833 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 1834 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 1835 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, 1836 MachinePointerInfo::getFixedStack(NewReturnAddrFI), 1837 false, false, 0); 1838 return Chain; 1839} 1840 1841SDValue 1842X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee, 1843 CallingConv::ID CallConv, bool isVarArg, 1844 bool &isTailCall, 1845 const SmallVectorImpl<ISD::OutputArg> &Outs, 1846 const SmallVectorImpl<SDValue> &OutVals, 1847 const SmallVectorImpl<ISD::InputArg> &Ins, 1848 DebugLoc dl, SelectionDAG &DAG, 1849 SmallVectorImpl<SDValue> &InVals) const { 1850 MachineFunction &MF = DAG.getMachineFunction(); 1851 bool Is64Bit = Subtarget->is64Bit(); 1852 bool IsStructRet = CallIsStructReturn(Outs); 1853 bool IsSibcall = false; 1854 1855 if (isTailCall) { 1856 // Check if it's really possible to do a tail call. 1857 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1858 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1859 Outs, OutVals, Ins, DAG); 1860 1861 // Sibcalls are automatically detected tailcalls which do not require 1862 // ABI changes. 1863 if (!GuaranteedTailCallOpt && isTailCall) 1864 IsSibcall = true; 1865 1866 if (isTailCall) 1867 ++NumTailCalls; 1868 } 1869 1870 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 1871 "Var args not supported with calling convention fastcc or ghc"); 1872 1873 // Analyze operands of the call, assigning locations to each operand. 1874 SmallVector<CCValAssign, 16> ArgLocs; 1875 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 1876 ArgLocs, *DAG.getContext()); 1877 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv)); 1878 1879 // Get a count of how many bytes are to be pushed on the stack. 1880 unsigned NumBytes = CCInfo.getNextStackOffset(); 1881 if (IsSibcall) 1882 // This is a sibcall. The memory operands are available in caller's 1883 // own caller's stack. 1884 NumBytes = 0; 1885 else if (GuaranteedTailCallOpt && IsTailCallConvention(CallConv)) 1886 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 1887 1888 int FPDiff = 0; 1889 if (isTailCall && !IsSibcall) { 1890 // Lower arguments at fp - stackoffset + fpdiff. 1891 unsigned NumBytesCallerPushed = 1892 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 1893 FPDiff = NumBytesCallerPushed - NumBytes; 1894 1895 // Set the delta of movement of the returnaddr stackslot. 1896 // But only set if delta is greater than previous delta. 1897 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 1898 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 1899 } 1900 1901 if (!IsSibcall) 1902 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1903 1904 SDValue RetAddrFrIdx; 1905 // Load return adress for tail calls. 1906 if (isTailCall && FPDiff) 1907 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall, 1908 Is64Bit, FPDiff, dl); 1909 1910 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 1911 SmallVector<SDValue, 8> MemOpChains; 1912 SDValue StackPtr; 1913 1914 // Walk the register/memloc assignments, inserting copies/loads. In the case 1915 // of tail call optimization arguments are handle later. 1916 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1917 CCValAssign &VA = ArgLocs[i]; 1918 EVT RegVT = VA.getLocVT(); 1919 SDValue Arg = OutVals[i]; 1920 ISD::ArgFlagsTy Flags = Outs[i].Flags; 1921 bool isByVal = Flags.isByVal(); 1922 1923 // Promote the value if needed. 1924 switch (VA.getLocInfo()) { 1925 default: llvm_unreachable("Unknown loc info!"); 1926 case CCValAssign::Full: break; 1927 case CCValAssign::SExt: 1928 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg); 1929 break; 1930 case CCValAssign::ZExt: 1931 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg); 1932 break; 1933 case CCValAssign::AExt: 1934 if (RegVT.isVector() && RegVT.getSizeInBits() == 128) { 1935 // Special case: passing MMX values in XMM registers. 1936 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg); 1937 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); 1938 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg); 1939 } else 1940 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg); 1941 break; 1942 case CCValAssign::BCvt: 1943 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, RegVT, Arg); 1944 break; 1945 case CCValAssign::Indirect: { 1946 // Store the argument. 1947 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 1948 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 1949 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot, 1950 MachinePointerInfo::getFixedStack(FI), 1951 false, false, 0); 1952 Arg = SpillSlot; 1953 break; 1954 } 1955 } 1956 1957 if (VA.isRegLoc()) { 1958 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1959 if (isVarArg && Subtarget->isTargetWin64()) { 1960 // Win64 ABI requires argument XMM reg to be copied to the corresponding 1961 // shadow reg if callee is a varargs function. 1962 unsigned ShadowReg = 0; 1963 switch (VA.getLocReg()) { 1964 case X86::XMM0: ShadowReg = X86::RCX; break; 1965 case X86::XMM1: ShadowReg = X86::RDX; break; 1966 case X86::XMM2: ShadowReg = X86::R8; break; 1967 case X86::XMM3: ShadowReg = X86::R9; break; 1968 } 1969 if (ShadowReg) 1970 RegsToPass.push_back(std::make_pair(ShadowReg, Arg)); 1971 } 1972 } else if (!IsSibcall && (!isTailCall || isByVal)) { 1973 assert(VA.isMemLoc()); 1974 if (StackPtr.getNode() == 0) 1975 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, getPointerTy()); 1976 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1977 dl, DAG, VA, Flags)); 1978 } 1979 } 1980 1981 if (!MemOpChains.empty()) 1982 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1983 &MemOpChains[0], MemOpChains.size()); 1984 1985 // Build a sequence of copy-to-reg nodes chained together with token chain 1986 // and flag operands which copy the outgoing args into registers. 1987 SDValue InFlag; 1988 // Tail call byval lowering might overwrite argument registers so in case of 1989 // tail call optimization the copies to registers are lowered later. 1990 if (!isTailCall) 1991 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1992 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1993 RegsToPass[i].second, InFlag); 1994 InFlag = Chain.getValue(1); 1995 } 1996 1997 if (Subtarget->isPICStyleGOT()) { 1998 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1999 // GOT pointer. 2000 if (!isTailCall) { 2001 Chain = DAG.getCopyToReg(Chain, dl, X86::EBX, 2002 DAG.getNode(X86ISD::GlobalBaseReg, 2003 DebugLoc(), getPointerTy()), 2004 InFlag); 2005 InFlag = Chain.getValue(1); 2006 } else { 2007 // If we are tail calling and generating PIC/GOT style code load the 2008 // address of the callee into ECX. The value in ecx is used as target of 2009 // the tail jump. This is done to circumvent the ebx/callee-saved problem 2010 // for tail calls on PIC/GOT architectures. Normally we would just put the 2011 // address of GOT into ebx and then call target@PLT. But for tail calls 2012 // ebx would be restored (since ebx is callee saved) before jumping to the 2013 // target@PLT. 2014 2015 // Note: The actual moving to ECX is done further down. 2016 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 2017 if (G && !G->getGlobal()->hasHiddenVisibility() && 2018 !G->getGlobal()->hasProtectedVisibility()) 2019 Callee = LowerGlobalAddress(Callee, DAG); 2020 else if (isa<ExternalSymbolSDNode>(Callee)) 2021 Callee = LowerExternalSymbol(Callee, DAG); 2022 } 2023 } 2024 2025 if (Is64Bit && isVarArg && !Subtarget->isTargetWin64()) { 2026 // From AMD64 ABI document: 2027 // For calls that may call functions that use varargs or stdargs 2028 // (prototype-less calls or calls to functions containing ellipsis (...) in 2029 // the declaration) %al is used as hidden argument to specify the number 2030 // of SSE registers used. The contents of %al do not need to match exactly 2031 // the number of registers, but must be an ubound on the number of SSE 2032 // registers used and is in the range 0 - 8 inclusive. 2033 2034 // Count the number of XMM registers allocated. 2035 static const unsigned XMMArgRegs[] = { 2036 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2037 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2038 }; 2039 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 2040 assert((Subtarget->hasSSE1() || !NumXMMRegs) 2041 && "SSE registers cannot be used when SSE is disabled"); 2042 2043 Chain = DAG.getCopyToReg(Chain, dl, X86::AL, 2044 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 2045 InFlag = Chain.getValue(1); 2046 } 2047 2048 2049 // For tail calls lower the arguments to the 'real' stack slot. 2050 if (isTailCall) { 2051 // Force all the incoming stack arguments to be loaded from the stack 2052 // before any new outgoing arguments are stored to the stack, because the 2053 // outgoing stack slots may alias the incoming argument stack slots, and 2054 // the alias isn't otherwise explicit. This is slightly more conservative 2055 // than necessary, because it means that each store effectively depends 2056 // on every argument instead of just those arguments it would clobber. 2057 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain); 2058 2059 SmallVector<SDValue, 8> MemOpChains2; 2060 SDValue FIN; 2061 int FI = 0; 2062 // Do not flag preceeding copytoreg stuff together with the following stuff. 2063 InFlag = SDValue(); 2064 if (GuaranteedTailCallOpt) { 2065 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2066 CCValAssign &VA = ArgLocs[i]; 2067 if (VA.isRegLoc()) 2068 continue; 2069 assert(VA.isMemLoc()); 2070 SDValue Arg = OutVals[i]; 2071 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2072 // Create frame index. 2073 int32_t Offset = VA.getLocMemOffset()+FPDiff; 2074 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; 2075 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 2076 FIN = DAG.getFrameIndex(FI, getPointerTy()); 2077 2078 if (Flags.isByVal()) { 2079 // Copy relative to framepointer. 2080 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 2081 if (StackPtr.getNode() == 0) 2082 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, 2083 getPointerTy()); 2084 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source); 2085 2086 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, 2087 ArgChain, 2088 Flags, DAG, dl)); 2089 } else { 2090 // Store relative to framepointer. 2091 MemOpChains2.push_back( 2092 DAG.getStore(ArgChain, dl, Arg, FIN, 2093 MachinePointerInfo::getFixedStack(FI), 2094 false, false, 0)); 2095 } 2096 } 2097 } 2098 2099 if (!MemOpChains2.empty()) 2100 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2101 &MemOpChains2[0], MemOpChains2.size()); 2102 2103 // Copy arguments to their registers. 2104 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2105 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2106 RegsToPass[i].second, InFlag); 2107 InFlag = Chain.getValue(1); 2108 } 2109 InFlag =SDValue(); 2110 2111 // Store the return address to the appropriate stack slot. 2112 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit, 2113 FPDiff, dl); 2114 } 2115 2116 if (getTargetMachine().getCodeModel() == CodeModel::Large) { 2117 assert(Is64Bit && "Large code model is only legal in 64-bit mode."); 2118 // In the 64-bit large code model, we have to make all calls 2119 // through a register, since the call instruction's 32-bit 2120 // pc-relative offset may not be large enough to hold the whole 2121 // address. 2122 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2123 // If the callee is a GlobalAddress node (quite common, every direct call 2124 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack 2125 // it. 2126 2127 // We should use extra load for direct calls to dllimported functions in 2128 // non-JIT mode. 2129 const GlobalValue *GV = G->getGlobal(); 2130 if (!GV->hasDLLImportLinkage()) { 2131 unsigned char OpFlags = 0; 2132 2133 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to 2134 // external symbols most go through the PLT in PIC mode. If the symbol 2135 // has hidden or protected visibility, or if it is static or local, then 2136 // we don't need to use the PLT - we can directly call it. 2137 if (Subtarget->isTargetELF() && 2138 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 2139 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) { 2140 OpFlags = X86II::MO_PLT; 2141 } else if (Subtarget->isPICStyleStubAny() && 2142 (GV->isDeclaration() || GV->isWeakForLinker()) && 2143 Subtarget->getDarwinVers() < 9) { 2144 // PC-relative references to external symbols should go through $stub, 2145 // unless we're building with the leopard linker or later, which 2146 // automatically synthesizes these stubs. 2147 OpFlags = X86II::MO_DARWIN_STUB; 2148 } 2149 2150 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 2151 G->getOffset(), OpFlags); 2152 } 2153 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2154 unsigned char OpFlags = 0; 2155 2156 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to external 2157 // symbols should go through the PLT. 2158 if (Subtarget->isTargetELF() && 2159 getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2160 OpFlags = X86II::MO_PLT; 2161 } else if (Subtarget->isPICStyleStubAny() && 2162 Subtarget->getDarwinVers() < 9) { 2163 // PC-relative references to external symbols should go through $stub, 2164 // unless we're building with the leopard linker or later, which 2165 // automatically synthesizes these stubs. 2166 OpFlags = X86II::MO_DARWIN_STUB; 2167 } 2168 2169 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(), 2170 OpFlags); 2171 } 2172 2173 // Returns a chain & a flag for retval copy to use. 2174 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 2175 SmallVector<SDValue, 8> Ops; 2176 2177 if (!IsSibcall && isTailCall) { 2178 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2179 DAG.getIntPtrConstant(0, true), InFlag); 2180 InFlag = Chain.getValue(1); 2181 } 2182 2183 Ops.push_back(Chain); 2184 Ops.push_back(Callee); 2185 2186 if (isTailCall) 2187 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 2188 2189 // Add argument registers to the end of the list so that they are known live 2190 // into the call. 2191 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2192 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2193 RegsToPass[i].second.getValueType())); 2194 2195 // Add an implicit use GOT pointer in EBX. 2196 if (!isTailCall && Subtarget->isPICStyleGOT()) 2197 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 2198 2199 // Add an implicit use of AL for non-Windows x86 64-bit vararg functions. 2200 if (Is64Bit && isVarArg && !Subtarget->isTargetWin64()) 2201 Ops.push_back(DAG.getRegister(X86::AL, MVT::i8)); 2202 2203 if (InFlag.getNode()) 2204 Ops.push_back(InFlag); 2205 2206 if (isTailCall) { 2207 // We used to do: 2208 //// If this is the first return lowered for this function, add the regs 2209 //// to the liveout set for the function. 2210 // This isn't right, although it's probably harmless on x86; liveouts 2211 // should be computed from returns not tail calls. Consider a void 2212 // function making a tail call to a function returning int. 2213 return DAG.getNode(X86ISD::TC_RETURN, dl, 2214 NodeTys, &Ops[0], Ops.size()); 2215 } 2216 2217 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); 2218 InFlag = Chain.getValue(1); 2219 2220 // Create the CALLSEQ_END node. 2221 unsigned NumBytesForCalleeToPush; 2222 if (Subtarget->IsCalleePop(isVarArg, CallConv)) 2223 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 2224 else if (!Is64Bit && !IsTailCallConvention(CallConv) && IsStructRet) 2225 // If this is a call to a struct-return function, the callee 2226 // pops the hidden struct pointer, so we have to push it back. 2227 // This is common for Darwin/X86, Linux & Mingw32 targets. 2228 NumBytesForCalleeToPush = 4; 2229 else 2230 NumBytesForCalleeToPush = 0; // Callee pops nothing. 2231 2232 // Returns a flag for retval copy to use. 2233 if (!IsSibcall) { 2234 Chain = DAG.getCALLSEQ_END(Chain, 2235 DAG.getIntPtrConstant(NumBytes, true), 2236 DAG.getIntPtrConstant(NumBytesForCalleeToPush, 2237 true), 2238 InFlag); 2239 InFlag = Chain.getValue(1); 2240 } 2241 2242 // Handle result values, copying them out of physregs into vregs that we 2243 // return. 2244 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 2245 Ins, dl, DAG, InVals); 2246} 2247 2248 2249//===----------------------------------------------------------------------===// 2250// Fast Calling Convention (tail call) implementation 2251//===----------------------------------------------------------------------===// 2252 2253// Like std call, callee cleans arguments, convention except that ECX is 2254// reserved for storing the tail called function address. Only 2 registers are 2255// free for argument passing (inreg). Tail call optimization is performed 2256// provided: 2257// * tailcallopt is enabled 2258// * caller/callee are fastcc 2259// On X86_64 architecture with GOT-style position independent code only local 2260// (within module) calls are supported at the moment. 2261// To keep the stack aligned according to platform abi the function 2262// GetAlignedArgumentStackSize ensures that argument delta is always multiples 2263// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 2264// If a tail called function callee has more arguments than the caller the 2265// caller needs to make sure that there is room to move the RETADDR to. This is 2266// achieved by reserving an area the size of the argument delta right after the 2267// original REtADDR, but before the saved framepointer or the spilled registers 2268// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 2269// stack layout: 2270// arg1 2271// arg2 2272// RETADDR 2273// [ new RETADDR 2274// move area ] 2275// (possible EBP) 2276// ESI 2277// EDI 2278// local1 .. 2279 2280/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 2281/// for a 16 byte align requirement. 2282unsigned 2283X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 2284 SelectionDAG& DAG) const { 2285 MachineFunction &MF = DAG.getMachineFunction(); 2286 const TargetMachine &TM = MF.getTarget(); 2287 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 2288 unsigned StackAlignment = TFI.getStackAlignment(); 2289 uint64_t AlignMask = StackAlignment - 1; 2290 int64_t Offset = StackSize; 2291 uint64_t SlotSize = TD->getPointerSize(); 2292 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 2293 // Number smaller than 12 so just add the difference. 2294 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 2295 } else { 2296 // Mask out lower bits, add stackalignment once plus the 12 bytes. 2297 Offset = ((~AlignMask) & Offset) + StackAlignment + 2298 (StackAlignment-SlotSize); 2299 } 2300 return Offset; 2301} 2302 2303/// MatchingStackOffset - Return true if the given stack call argument is 2304/// already available in the same position (relatively) of the caller's 2305/// incoming argument stack. 2306static 2307bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2308 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 2309 const X86InstrInfo *TII) { 2310 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 2311 int FI = INT_MAX; 2312 if (Arg.getOpcode() == ISD::CopyFromReg) { 2313 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2314 if (!VR || TargetRegisterInfo::isPhysicalRegister(VR)) 2315 return false; 2316 MachineInstr *Def = MRI->getVRegDef(VR); 2317 if (!Def) 2318 return false; 2319 if (!Flags.isByVal()) { 2320 if (!TII->isLoadFromStackSlot(Def, FI)) 2321 return false; 2322 } else { 2323 unsigned Opcode = Def->getOpcode(); 2324 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) && 2325 Def->getOperand(1).isFI()) { 2326 FI = Def->getOperand(1).getIndex(); 2327 Bytes = Flags.getByValSize(); 2328 } else 2329 return false; 2330 } 2331 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2332 if (Flags.isByVal()) 2333 // ByVal argument is passed in as a pointer but it's now being 2334 // dereferenced. e.g. 2335 // define @foo(%struct.X* %A) { 2336 // tail call @bar(%struct.X* byval %A) 2337 // } 2338 return false; 2339 SDValue Ptr = Ld->getBasePtr(); 2340 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2341 if (!FINode) 2342 return false; 2343 FI = FINode->getIndex(); 2344 } else 2345 return false; 2346 2347 assert(FI != INT_MAX); 2348 if (!MFI->isFixedObjectIndex(FI)) 2349 return false; 2350 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 2351} 2352 2353/// IsEligibleForTailCallOptimization - Check whether the call is eligible 2354/// for tail call optimization. Targets which want to do tail call 2355/// optimization should implement this function. 2356bool 2357X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2358 CallingConv::ID CalleeCC, 2359 bool isVarArg, 2360 bool isCalleeStructRet, 2361 bool isCallerStructRet, 2362 const SmallVectorImpl<ISD::OutputArg> &Outs, 2363 const SmallVectorImpl<SDValue> &OutVals, 2364 const SmallVectorImpl<ISD::InputArg> &Ins, 2365 SelectionDAG& DAG) const { 2366 if (!IsTailCallConvention(CalleeCC) && 2367 CalleeCC != CallingConv::C) 2368 return false; 2369 2370 // If -tailcallopt is specified, make fastcc functions tail-callable. 2371 const MachineFunction &MF = DAG.getMachineFunction(); 2372 const Function *CallerF = DAG.getMachineFunction().getFunction(); 2373 CallingConv::ID CallerCC = CallerF->getCallingConv(); 2374 bool CCMatch = CallerCC == CalleeCC; 2375 2376 if (GuaranteedTailCallOpt) { 2377 if (IsTailCallConvention(CalleeCC) && CCMatch) 2378 return true; 2379 return false; 2380 } 2381 2382 // Look for obvious safe cases to perform tail call optimization that do not 2383 // require ABI changes. This is what gcc calls sibcall. 2384 2385 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to 2386 // emit a special epilogue. 2387 if (RegInfo->needsStackRealignment(MF)) 2388 return false; 2389 2390 // Do not sibcall optimize vararg calls unless the call site is not passing 2391 // any arguments. 2392 if (isVarArg && !Outs.empty()) 2393 return false; 2394 2395 // Also avoid sibcall optimization if either caller or callee uses struct 2396 // return semantics. 2397 if (isCalleeStructRet || isCallerStructRet) 2398 return false; 2399 2400 // If the call result is in ST0 / ST1, it needs to be popped off the x87 stack. 2401 // Therefore if it's not used by the call it is not safe to optimize this into 2402 // a sibcall. 2403 bool Unused = false; 2404 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 2405 if (!Ins[i].Used) { 2406 Unused = true; 2407 break; 2408 } 2409 } 2410 if (Unused) { 2411 SmallVector<CCValAssign, 16> RVLocs; 2412 CCState CCInfo(CalleeCC, false, getTargetMachine(), 2413 RVLocs, *DAG.getContext()); 2414 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 2415 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 2416 CCValAssign &VA = RVLocs[i]; 2417 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) 2418 return false; 2419 } 2420 } 2421 2422 // If the calling conventions do not match, then we'd better make sure the 2423 // results are returned in the same way as what the caller expects. 2424 if (!CCMatch) { 2425 SmallVector<CCValAssign, 16> RVLocs1; 2426 CCState CCInfo1(CalleeCC, false, getTargetMachine(), 2427 RVLocs1, *DAG.getContext()); 2428 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86); 2429 2430 SmallVector<CCValAssign, 16> RVLocs2; 2431 CCState CCInfo2(CallerCC, false, getTargetMachine(), 2432 RVLocs2, *DAG.getContext()); 2433 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86); 2434 2435 if (RVLocs1.size() != RVLocs2.size()) 2436 return false; 2437 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 2438 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 2439 return false; 2440 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 2441 return false; 2442 if (RVLocs1[i].isRegLoc()) { 2443 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 2444 return false; 2445 } else { 2446 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 2447 return false; 2448 } 2449 } 2450 } 2451 2452 // If the callee takes no arguments then go on to check the results of the 2453 // call. 2454 if (!Outs.empty()) { 2455 // Check if stack adjustment is needed. For now, do not do this if any 2456 // argument is passed on the stack. 2457 SmallVector<CCValAssign, 16> ArgLocs; 2458 CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(), 2459 ArgLocs, *DAG.getContext()); 2460 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC)); 2461 if (CCInfo.getNextStackOffset()) { 2462 MachineFunction &MF = DAG.getMachineFunction(); 2463 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) 2464 return false; 2465 if (Subtarget->isTargetWin64()) 2466 // Win64 ABI has additional complications. 2467 return false; 2468 2469 // Check if the arguments are already laid out in the right way as 2470 // the caller's fixed stack objects. 2471 MachineFrameInfo *MFI = MF.getFrameInfo(); 2472 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2473 const X86InstrInfo *TII = 2474 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 2475 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2476 CCValAssign &VA = ArgLocs[i]; 2477 SDValue Arg = OutVals[i]; 2478 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2479 if (VA.getLocInfo() == CCValAssign::Indirect) 2480 return false; 2481 if (!VA.isRegLoc()) { 2482 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2483 MFI, MRI, TII)) 2484 return false; 2485 } 2486 } 2487 } 2488 2489 // If the tailcall address may be in a register, then make sure it's 2490 // possible to register allocate for it. In 32-bit, the call address can 2491 // only target EAX, EDX, or ECX since the tail call must be scheduled after 2492 // callee-saved registers are restored. These happen to be the same 2493 // registers used to pass 'inreg' arguments so watch out for those. 2494 if (!Subtarget->is64Bit() && 2495 !isa<GlobalAddressSDNode>(Callee) && 2496 !isa<ExternalSymbolSDNode>(Callee)) { 2497 unsigned NumInRegs = 0; 2498 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2499 CCValAssign &VA = ArgLocs[i]; 2500 if (!VA.isRegLoc()) 2501 continue; 2502 unsigned Reg = VA.getLocReg(); 2503 switch (Reg) { 2504 default: break; 2505 case X86::EAX: case X86::EDX: case X86::ECX: 2506 if (++NumInRegs == 3) 2507 return false; 2508 break; 2509 } 2510 } 2511 } 2512 } 2513 2514 return true; 2515} 2516 2517FastISel * 2518X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 2519 return X86::createFastISel(funcInfo); 2520} 2521 2522 2523//===----------------------------------------------------------------------===// 2524// Other Lowering Hooks 2525//===----------------------------------------------------------------------===// 2526 2527static bool MayFoldLoad(SDValue Op) { 2528 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode()); 2529} 2530 2531static bool MayFoldIntoStore(SDValue Op) { 2532 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); 2533} 2534 2535static bool isTargetShuffle(unsigned Opcode) { 2536 switch(Opcode) { 2537 default: return false; 2538 case X86ISD::PSHUFD: 2539 case X86ISD::PSHUFHW: 2540 case X86ISD::PSHUFLW: 2541 case X86ISD::SHUFPD: 2542 case X86ISD::PALIGN: 2543 case X86ISD::SHUFPS: 2544 case X86ISD::MOVLHPS: 2545 case X86ISD::MOVLHPD: 2546 case X86ISD::MOVHLPS: 2547 case X86ISD::MOVLPS: 2548 case X86ISD::MOVLPD: 2549 case X86ISD::MOVSHDUP: 2550 case X86ISD::MOVSLDUP: 2551 case X86ISD::MOVDDUP: 2552 case X86ISD::MOVSS: 2553 case X86ISD::MOVSD: 2554 case X86ISD::UNPCKLPS: 2555 case X86ISD::UNPCKLPD: 2556 case X86ISD::PUNPCKLWD: 2557 case X86ISD::PUNPCKLBW: 2558 case X86ISD::PUNPCKLDQ: 2559 case X86ISD::PUNPCKLQDQ: 2560 case X86ISD::UNPCKHPS: 2561 case X86ISD::UNPCKHPD: 2562 case X86ISD::PUNPCKHWD: 2563 case X86ISD::PUNPCKHBW: 2564 case X86ISD::PUNPCKHDQ: 2565 case X86ISD::PUNPCKHQDQ: 2566 return true; 2567 } 2568 return false; 2569} 2570 2571static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2572 SDValue V1, SelectionDAG &DAG) { 2573 switch(Opc) { 2574 default: llvm_unreachable("Unknown x86 shuffle node"); 2575 case X86ISD::MOVSHDUP: 2576 case X86ISD::MOVSLDUP: 2577 case X86ISD::MOVDDUP: 2578 return DAG.getNode(Opc, dl, VT, V1); 2579 } 2580 2581 return SDValue(); 2582} 2583 2584static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2585 SDValue V1, unsigned TargetMask, SelectionDAG &DAG) { 2586 switch(Opc) { 2587 default: llvm_unreachable("Unknown x86 shuffle node"); 2588 case X86ISD::PSHUFD: 2589 case X86ISD::PSHUFHW: 2590 case X86ISD::PSHUFLW: 2591 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8)); 2592 } 2593 2594 return SDValue(); 2595} 2596 2597static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2598 SDValue V1, SDValue V2, unsigned TargetMask, SelectionDAG &DAG) { 2599 switch(Opc) { 2600 default: llvm_unreachable("Unknown x86 shuffle node"); 2601 case X86ISD::PALIGN: 2602 case X86ISD::SHUFPD: 2603 case X86ISD::SHUFPS: 2604 return DAG.getNode(Opc, dl, VT, V1, V2, 2605 DAG.getConstant(TargetMask, MVT::i8)); 2606 } 2607 return SDValue(); 2608} 2609 2610static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2611 SDValue V1, SDValue V2, SelectionDAG &DAG) { 2612 switch(Opc) { 2613 default: llvm_unreachable("Unknown x86 shuffle node"); 2614 case X86ISD::MOVLHPS: 2615 case X86ISD::MOVLHPD: 2616 case X86ISD::MOVHLPS: 2617 case X86ISD::MOVLPS: 2618 case X86ISD::MOVLPD: 2619 case X86ISD::MOVSS: 2620 case X86ISD::MOVSD: 2621 case X86ISD::UNPCKLPS: 2622 case X86ISD::UNPCKLPD: 2623 case X86ISD::PUNPCKLWD: 2624 case X86ISD::PUNPCKLBW: 2625 case X86ISD::PUNPCKLDQ: 2626 case X86ISD::PUNPCKLQDQ: 2627 case X86ISD::UNPCKHPS: 2628 case X86ISD::UNPCKHPD: 2629 case X86ISD::PUNPCKHWD: 2630 case X86ISD::PUNPCKHBW: 2631 case X86ISD::PUNPCKHDQ: 2632 case X86ISD::PUNPCKHQDQ: 2633 return DAG.getNode(Opc, dl, VT, V1, V2); 2634 } 2635 return SDValue(); 2636} 2637 2638SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { 2639 MachineFunction &MF = DAG.getMachineFunction(); 2640 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 2641 int ReturnAddrIndex = FuncInfo->getRAIndex(); 2642 2643 if (ReturnAddrIndex == 0) { 2644 // Set up a frame object for the return address. 2645 uint64_t SlotSize = TD->getPointerSize(); 2646 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize, 2647 false); 2648 FuncInfo->setRAIndex(ReturnAddrIndex); 2649 } 2650 2651 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 2652} 2653 2654 2655bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, 2656 bool hasSymbolicDisplacement) { 2657 // Offset should fit into 32 bit immediate field. 2658 if (!isInt<32>(Offset)) 2659 return false; 2660 2661 // If we don't have a symbolic displacement - we don't have any extra 2662 // restrictions. 2663 if (!hasSymbolicDisplacement) 2664 return true; 2665 2666 // FIXME: Some tweaks might be needed for medium code model. 2667 if (M != CodeModel::Small && M != CodeModel::Kernel) 2668 return false; 2669 2670 // For small code model we assume that latest object is 16MB before end of 31 2671 // bits boundary. We may also accept pretty large negative constants knowing 2672 // that all objects are in the positive half of address space. 2673 if (M == CodeModel::Small && Offset < 16*1024*1024) 2674 return true; 2675 2676 // For kernel code model we know that all object resist in the negative half 2677 // of 32bits address space. We may not accept negative offsets, since they may 2678 // be just off and we may accept pretty large positive ones. 2679 if (M == CodeModel::Kernel && Offset > 0) 2680 return true; 2681 2682 return false; 2683} 2684 2685/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86 2686/// specific condition code, returning the condition code and the LHS/RHS of the 2687/// comparison to make. 2688static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 2689 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) { 2690 if (!isFP) { 2691 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 2692 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 2693 // X > -1 -> X == 0, jump !sign. 2694 RHS = DAG.getConstant(0, RHS.getValueType()); 2695 return X86::COND_NS; 2696 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 2697 // X < 0 -> X == 0, jump on sign. 2698 return X86::COND_S; 2699 } else if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { 2700 // X < 1 -> X <= 0 2701 RHS = DAG.getConstant(0, RHS.getValueType()); 2702 return X86::COND_LE; 2703 } 2704 } 2705 2706 switch (SetCCOpcode) { 2707 default: llvm_unreachable("Invalid integer condition!"); 2708 case ISD::SETEQ: return X86::COND_E; 2709 case ISD::SETGT: return X86::COND_G; 2710 case ISD::SETGE: return X86::COND_GE; 2711 case ISD::SETLT: return X86::COND_L; 2712 case ISD::SETLE: return X86::COND_LE; 2713 case ISD::SETNE: return X86::COND_NE; 2714 case ISD::SETULT: return X86::COND_B; 2715 case ISD::SETUGT: return X86::COND_A; 2716 case ISD::SETULE: return X86::COND_BE; 2717 case ISD::SETUGE: return X86::COND_AE; 2718 } 2719 } 2720 2721 // First determine if it is required or is profitable to flip the operands. 2722 2723 // If LHS is a foldable load, but RHS is not, flip the condition. 2724 if ((ISD::isNON_EXTLoad(LHS.getNode()) && LHS.hasOneUse()) && 2725 !(ISD::isNON_EXTLoad(RHS.getNode()) && RHS.hasOneUse())) { 2726 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode); 2727 std::swap(LHS, RHS); 2728 } 2729 2730 switch (SetCCOpcode) { 2731 default: break; 2732 case ISD::SETOLT: 2733 case ISD::SETOLE: 2734 case ISD::SETUGT: 2735 case ISD::SETUGE: 2736 std::swap(LHS, RHS); 2737 break; 2738 } 2739 2740 // On a floating point condition, the flags are set as follows: 2741 // ZF PF CF op 2742 // 0 | 0 | 0 | X > Y 2743 // 0 | 0 | 1 | X < Y 2744 // 1 | 0 | 0 | X == Y 2745 // 1 | 1 | 1 | unordered 2746 switch (SetCCOpcode) { 2747 default: llvm_unreachable("Condcode should be pre-legalized away"); 2748 case ISD::SETUEQ: 2749 case ISD::SETEQ: return X86::COND_E; 2750 case ISD::SETOLT: // flipped 2751 case ISD::SETOGT: 2752 case ISD::SETGT: return X86::COND_A; 2753 case ISD::SETOLE: // flipped 2754 case ISD::SETOGE: 2755 case ISD::SETGE: return X86::COND_AE; 2756 case ISD::SETUGT: // flipped 2757 case ISD::SETULT: 2758 case ISD::SETLT: return X86::COND_B; 2759 case ISD::SETUGE: // flipped 2760 case ISD::SETULE: 2761 case ISD::SETLE: return X86::COND_BE; 2762 case ISD::SETONE: 2763 case ISD::SETNE: return X86::COND_NE; 2764 case ISD::SETUO: return X86::COND_P; 2765 case ISD::SETO: return X86::COND_NP; 2766 case ISD::SETOEQ: 2767 case ISD::SETUNE: return X86::COND_INVALID; 2768 } 2769} 2770 2771/// hasFPCMov - is there a floating point cmov for the specific X86 condition 2772/// code. Current x86 isa includes the following FP cmov instructions: 2773/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 2774static bool hasFPCMov(unsigned X86CC) { 2775 switch (X86CC) { 2776 default: 2777 return false; 2778 case X86::COND_B: 2779 case X86::COND_BE: 2780 case X86::COND_E: 2781 case X86::COND_P: 2782 case X86::COND_A: 2783 case X86::COND_AE: 2784 case X86::COND_NE: 2785 case X86::COND_NP: 2786 return true; 2787 } 2788} 2789 2790/// isFPImmLegal - Returns true if the target can instruction select the 2791/// specified FP immediate natively. If false, the legalizer will 2792/// materialize the FP immediate as a load from a constant pool. 2793bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 2794 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) { 2795 if (Imm.bitwiseIsEqual(LegalFPImmediates[i])) 2796 return true; 2797 } 2798 return false; 2799} 2800 2801/// isUndefOrInRange - Return true if Val is undef or if its value falls within 2802/// the specified range (L, H]. 2803static bool isUndefOrInRange(int Val, int Low, int Hi) { 2804 return (Val < 0) || (Val >= Low && Val < Hi); 2805} 2806 2807/// isUndefOrEqual - Val is either less than zero (undef) or equal to the 2808/// specified value. 2809static bool isUndefOrEqual(int Val, int CmpVal) { 2810 if (Val < 0 || Val == CmpVal) 2811 return true; 2812 return false; 2813} 2814 2815/// isPSHUFDMask - Return true if the node specifies a shuffle of elements that 2816/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference 2817/// the second operand. 2818static bool isPSHUFDMask(const SmallVectorImpl<int> &Mask, EVT VT) { 2819 if (VT == MVT::v4f32 || VT == MVT::v4i32 ) 2820 return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4); 2821 if (VT == MVT::v2f64 || VT == MVT::v2i64) 2822 return (Mask[0] < 2 && Mask[1] < 2); 2823 return false; 2824} 2825 2826bool X86::isPSHUFDMask(ShuffleVectorSDNode *N) { 2827 SmallVector<int, 8> M; 2828 N->getMask(M); 2829 return ::isPSHUFDMask(M, N->getValueType(0)); 2830} 2831 2832/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that 2833/// is suitable for input to PSHUFHW. 2834static bool isPSHUFHWMask(const SmallVectorImpl<int> &Mask, EVT VT) { 2835 if (VT != MVT::v8i16) 2836 return false; 2837 2838 // Lower quadword copied in order or undef. 2839 for (int i = 0; i != 4; ++i) 2840 if (Mask[i] >= 0 && Mask[i] != i) 2841 return false; 2842 2843 // Upper quadword shuffled. 2844 for (int i = 4; i != 8; ++i) 2845 if (Mask[i] >= 0 && (Mask[i] < 4 || Mask[i] > 7)) 2846 return false; 2847 2848 return true; 2849} 2850 2851bool X86::isPSHUFHWMask(ShuffleVectorSDNode *N) { 2852 SmallVector<int, 8> M; 2853 N->getMask(M); 2854 return ::isPSHUFHWMask(M, N->getValueType(0)); 2855} 2856 2857/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that 2858/// is suitable for input to PSHUFLW. 2859static bool isPSHUFLWMask(const SmallVectorImpl<int> &Mask, EVT VT) { 2860 if (VT != MVT::v8i16) 2861 return false; 2862 2863 // Upper quadword copied in order. 2864 for (int i = 4; i != 8; ++i) 2865 if (Mask[i] >= 0 && Mask[i] != i) 2866 return false; 2867 2868 // Lower quadword shuffled. 2869 for (int i = 0; i != 4; ++i) 2870 if (Mask[i] >= 4) 2871 return false; 2872 2873 return true; 2874} 2875 2876bool X86::isPSHUFLWMask(ShuffleVectorSDNode *N) { 2877 SmallVector<int, 8> M; 2878 N->getMask(M); 2879 return ::isPSHUFLWMask(M, N->getValueType(0)); 2880} 2881 2882/// isPALIGNRMask - Return true if the node specifies a shuffle of elements that 2883/// is suitable for input to PALIGNR. 2884static bool isPALIGNRMask(const SmallVectorImpl<int> &Mask, EVT VT, 2885 bool hasSSSE3) { 2886 int i, e = VT.getVectorNumElements(); 2887 2888 // Do not handle v2i64 / v2f64 shuffles with palignr. 2889 if (e < 4 || !hasSSSE3) 2890 return false; 2891 2892 for (i = 0; i != e; ++i) 2893 if (Mask[i] >= 0) 2894 break; 2895 2896 // All undef, not a palignr. 2897 if (i == e) 2898 return false; 2899 2900 // Determine if it's ok to perform a palignr with only the LHS, since we 2901 // don't have access to the actual shuffle elements to see if RHS is undef. 2902 bool Unary = Mask[i] < (int)e; 2903 bool NeedsUnary = false; 2904 2905 int s = Mask[i] - i; 2906 2907 // Check the rest of the elements to see if they are consecutive. 2908 for (++i; i != e; ++i) { 2909 int m = Mask[i]; 2910 if (m < 0) 2911 continue; 2912 2913 Unary = Unary && (m < (int)e); 2914 NeedsUnary = NeedsUnary || (m < s); 2915 2916 if (NeedsUnary && !Unary) 2917 return false; 2918 if (Unary && m != ((s+i) & (e-1))) 2919 return false; 2920 if (!Unary && m != (s+i)) 2921 return false; 2922 } 2923 return true; 2924} 2925 2926bool X86::isPALIGNRMask(ShuffleVectorSDNode *N) { 2927 SmallVector<int, 8> M; 2928 N->getMask(M); 2929 return ::isPALIGNRMask(M, N->getValueType(0), true); 2930} 2931 2932/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 2933/// specifies a shuffle of elements that is suitable for input to SHUFP*. 2934static bool isSHUFPMask(const SmallVectorImpl<int> &Mask, EVT VT) { 2935 int NumElems = VT.getVectorNumElements(); 2936 if (NumElems != 2 && NumElems != 4) 2937 return false; 2938 2939 int Half = NumElems / 2; 2940 for (int i = 0; i < Half; ++i) 2941 if (!isUndefOrInRange(Mask[i], 0, NumElems)) 2942 return false; 2943 for (int i = Half; i < NumElems; ++i) 2944 if (!isUndefOrInRange(Mask[i], NumElems, NumElems*2)) 2945 return false; 2946 2947 return true; 2948} 2949 2950bool X86::isSHUFPMask(ShuffleVectorSDNode *N) { 2951 SmallVector<int, 8> M; 2952 N->getMask(M); 2953 return ::isSHUFPMask(M, N->getValueType(0)); 2954} 2955 2956/// isCommutedSHUFP - Returns true if the shuffle mask is exactly 2957/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 2958/// half elements to come from vector 1 (which would equal the dest.) and 2959/// the upper half to come from vector 2. 2960static bool isCommutedSHUFPMask(const SmallVectorImpl<int> &Mask, EVT VT) { 2961 int NumElems = VT.getVectorNumElements(); 2962 2963 if (NumElems != 2 && NumElems != 4) 2964 return false; 2965 2966 int Half = NumElems / 2; 2967 for (int i = 0; i < Half; ++i) 2968 if (!isUndefOrInRange(Mask[i], NumElems, NumElems*2)) 2969 return false; 2970 for (int i = Half; i < NumElems; ++i) 2971 if (!isUndefOrInRange(Mask[i], 0, NumElems)) 2972 return false; 2973 return true; 2974} 2975 2976static bool isCommutedSHUFP(ShuffleVectorSDNode *N) { 2977 SmallVector<int, 8> M; 2978 N->getMask(M); 2979 return isCommutedSHUFPMask(M, N->getValueType(0)); 2980} 2981 2982/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 2983/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 2984bool X86::isMOVHLPSMask(ShuffleVectorSDNode *N) { 2985 if (N->getValueType(0).getVectorNumElements() != 4) 2986 return false; 2987 2988 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 2989 return isUndefOrEqual(N->getMaskElt(0), 6) && 2990 isUndefOrEqual(N->getMaskElt(1), 7) && 2991 isUndefOrEqual(N->getMaskElt(2), 2) && 2992 isUndefOrEqual(N->getMaskElt(3), 3); 2993} 2994 2995/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 2996/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 2997/// <2, 3, 2, 3> 2998bool X86::isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N) { 2999 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 3000 3001 if (NumElems != 4) 3002 return false; 3003 3004 return isUndefOrEqual(N->getMaskElt(0), 2) && 3005 isUndefOrEqual(N->getMaskElt(1), 3) && 3006 isUndefOrEqual(N->getMaskElt(2), 2) && 3007 isUndefOrEqual(N->getMaskElt(3), 3); 3008} 3009 3010/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 3011/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 3012bool X86::isMOVLPMask(ShuffleVectorSDNode *N) { 3013 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 3014 3015 if (NumElems != 2 && NumElems != 4) 3016 return false; 3017 3018 for (unsigned i = 0; i < NumElems/2; ++i) 3019 if (!isUndefOrEqual(N->getMaskElt(i), i + NumElems)) 3020 return false; 3021 3022 for (unsigned i = NumElems/2; i < NumElems; ++i) 3023 if (!isUndefOrEqual(N->getMaskElt(i), i)) 3024 return false; 3025 3026 return true; 3027} 3028 3029/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand 3030/// specifies a shuffle of elements that is suitable for input to MOVLHPS. 3031bool X86::isMOVLHPSMask(ShuffleVectorSDNode *N) { 3032 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 3033 3034 if (NumElems != 2 && NumElems != 4) 3035 return false; 3036 3037 for (unsigned i = 0; i < NumElems/2; ++i) 3038 if (!isUndefOrEqual(N->getMaskElt(i), i)) 3039 return false; 3040 3041 for (unsigned i = 0; i < NumElems/2; ++i) 3042 if (!isUndefOrEqual(N->getMaskElt(i + NumElems/2), i + NumElems)) 3043 return false; 3044 3045 return true; 3046} 3047 3048/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 3049/// specifies a shuffle of elements that is suitable for input to UNPCKL. 3050static bool isUNPCKLMask(const SmallVectorImpl<int> &Mask, EVT VT, 3051 bool V2IsSplat = false) { 3052 int NumElts = VT.getVectorNumElements(); 3053 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 3054 return false; 3055 3056 for (int i = 0, j = 0; i != NumElts; i += 2, ++j) { 3057 int BitI = Mask[i]; 3058 int BitI1 = Mask[i+1]; 3059 if (!isUndefOrEqual(BitI, j)) 3060 return false; 3061 if (V2IsSplat) { 3062 if (!isUndefOrEqual(BitI1, NumElts)) 3063 return false; 3064 } else { 3065 if (!isUndefOrEqual(BitI1, j + NumElts)) 3066 return false; 3067 } 3068 } 3069 return true; 3070} 3071 3072bool X86::isUNPCKLMask(ShuffleVectorSDNode *N, bool V2IsSplat) { 3073 SmallVector<int, 8> M; 3074 N->getMask(M); 3075 return ::isUNPCKLMask(M, N->getValueType(0), V2IsSplat); 3076} 3077 3078/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 3079/// specifies a shuffle of elements that is suitable for input to UNPCKH. 3080static bool isUNPCKHMask(const SmallVectorImpl<int> &Mask, EVT VT, 3081 bool V2IsSplat = false) { 3082 int NumElts = VT.getVectorNumElements(); 3083 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 3084 return false; 3085 3086 for (int i = 0, j = 0; i != NumElts; i += 2, ++j) { 3087 int BitI = Mask[i]; 3088 int BitI1 = Mask[i+1]; 3089 if (!isUndefOrEqual(BitI, j + NumElts/2)) 3090 return false; 3091 if (V2IsSplat) { 3092 if (isUndefOrEqual(BitI1, NumElts)) 3093 return false; 3094 } else { 3095 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts)) 3096 return false; 3097 } 3098 } 3099 return true; 3100} 3101 3102bool X86::isUNPCKHMask(ShuffleVectorSDNode *N, bool V2IsSplat) { 3103 SmallVector<int, 8> M; 3104 N->getMask(M); 3105 return ::isUNPCKHMask(M, N->getValueType(0), V2IsSplat); 3106} 3107 3108/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 3109/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 3110/// <0, 0, 1, 1> 3111static bool isUNPCKL_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT) { 3112 int NumElems = VT.getVectorNumElements(); 3113 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 3114 return false; 3115 3116 for (int i = 0, j = 0; i != NumElems; i += 2, ++j) { 3117 int BitI = Mask[i]; 3118 int BitI1 = Mask[i+1]; 3119 if (!isUndefOrEqual(BitI, j)) 3120 return false; 3121 if (!isUndefOrEqual(BitI1, j)) 3122 return false; 3123 } 3124 return true; 3125} 3126 3127bool X86::isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N) { 3128 SmallVector<int, 8> M; 3129 N->getMask(M); 3130 return ::isUNPCKL_v_undef_Mask(M, N->getValueType(0)); 3131} 3132 3133/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 3134/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 3135/// <2, 2, 3, 3> 3136static bool isUNPCKH_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT) { 3137 int NumElems = VT.getVectorNumElements(); 3138 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 3139 return false; 3140 3141 for (int i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { 3142 int BitI = Mask[i]; 3143 int BitI1 = Mask[i+1]; 3144 if (!isUndefOrEqual(BitI, j)) 3145 return false; 3146 if (!isUndefOrEqual(BitI1, j)) 3147 return false; 3148 } 3149 return true; 3150} 3151 3152bool X86::isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N) { 3153 SmallVector<int, 8> M; 3154 N->getMask(M); 3155 return ::isUNPCKH_v_undef_Mask(M, N->getValueType(0)); 3156} 3157 3158/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 3159/// specifies a shuffle of elements that is suitable for input to MOVSS, 3160/// MOVSD, and MOVD, i.e. setting the lowest element. 3161static bool isMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT) { 3162 if (VT.getVectorElementType().getSizeInBits() < 32) 3163 return false; 3164 3165 int NumElts = VT.getVectorNumElements(); 3166 3167 if (!isUndefOrEqual(Mask[0], NumElts)) 3168 return false; 3169 3170 for (int i = 1; i < NumElts; ++i) 3171 if (!isUndefOrEqual(Mask[i], i)) 3172 return false; 3173 3174 return true; 3175} 3176 3177bool X86::isMOVLMask(ShuffleVectorSDNode *N) { 3178 SmallVector<int, 8> M; 3179 N->getMask(M); 3180 return ::isMOVLMask(M, N->getValueType(0)); 3181} 3182 3183/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 3184/// of what x86 movss want. X86 movs requires the lowest element to be lowest 3185/// element of vector 2 and the other elements to come from vector 1 in order. 3186static bool isCommutedMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT, 3187 bool V2IsSplat = false, bool V2IsUndef = false) { 3188 int NumOps = VT.getVectorNumElements(); 3189 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 3190 return false; 3191 3192 if (!isUndefOrEqual(Mask[0], 0)) 3193 return false; 3194 3195 for (int i = 1; i < NumOps; ++i) 3196 if (!(isUndefOrEqual(Mask[i], i+NumOps) || 3197 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) || 3198 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps)))) 3199 return false; 3200 3201 return true; 3202} 3203 3204static bool isCommutedMOVL(ShuffleVectorSDNode *N, bool V2IsSplat = false, 3205 bool V2IsUndef = false) { 3206 SmallVector<int, 8> M; 3207 N->getMask(M); 3208 return isCommutedMOVLMask(M, N->getValueType(0), V2IsSplat, V2IsUndef); 3209} 3210 3211/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3212/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 3213bool X86::isMOVSHDUPMask(ShuffleVectorSDNode *N) { 3214 if (N->getValueType(0).getVectorNumElements() != 4) 3215 return false; 3216 3217 // Expect 1, 1, 3, 3 3218 for (unsigned i = 0; i < 2; ++i) { 3219 int Elt = N->getMaskElt(i); 3220 if (Elt >= 0 && Elt != 1) 3221 return false; 3222 } 3223 3224 bool HasHi = false; 3225 for (unsigned i = 2; i < 4; ++i) { 3226 int Elt = N->getMaskElt(i); 3227 if (Elt >= 0 && Elt != 3) 3228 return false; 3229 if (Elt == 3) 3230 HasHi = true; 3231 } 3232 // Don't use movshdup if it can be done with a shufps. 3233 // FIXME: verify that matching u, u, 3, 3 is what we want. 3234 return HasHi; 3235} 3236 3237/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3238/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 3239bool X86::isMOVSLDUPMask(ShuffleVectorSDNode *N) { 3240 if (N->getValueType(0).getVectorNumElements() != 4) 3241 return false; 3242 3243 // Expect 0, 0, 2, 2 3244 for (unsigned i = 0; i < 2; ++i) 3245 if (N->getMaskElt(i) > 0) 3246 return false; 3247 3248 bool HasHi = false; 3249 for (unsigned i = 2; i < 4; ++i) { 3250 int Elt = N->getMaskElt(i); 3251 if (Elt >= 0 && Elt != 2) 3252 return false; 3253 if (Elt == 2) 3254 HasHi = true; 3255 } 3256 // Don't use movsldup if it can be done with a shufps. 3257 return HasHi; 3258} 3259 3260/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3261/// specifies a shuffle of elements that is suitable for input to MOVDDUP. 3262bool X86::isMOVDDUPMask(ShuffleVectorSDNode *N) { 3263 int e = N->getValueType(0).getVectorNumElements() / 2; 3264 3265 for (int i = 0; i < e; ++i) 3266 if (!isUndefOrEqual(N->getMaskElt(i), i)) 3267 return false; 3268 for (int i = 0; i < e; ++i) 3269 if (!isUndefOrEqual(N->getMaskElt(e+i), i)) 3270 return false; 3271 return true; 3272} 3273 3274/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 3275/// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions. 3276unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 3277 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3278 int NumOperands = SVOp->getValueType(0).getVectorNumElements(); 3279 3280 unsigned Shift = (NumOperands == 4) ? 2 : 1; 3281 unsigned Mask = 0; 3282 for (int i = 0; i < NumOperands; ++i) { 3283 int Val = SVOp->getMaskElt(NumOperands-i-1); 3284 if (Val < 0) Val = 0; 3285 if (Val >= NumOperands) Val -= NumOperands; 3286 Mask |= Val; 3287 if (i != NumOperands - 1) 3288 Mask <<= Shift; 3289 } 3290 return Mask; 3291} 3292 3293/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 3294/// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction. 3295unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 3296 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3297 unsigned Mask = 0; 3298 // 8 nodes, but we only care about the last 4. 3299 for (unsigned i = 7; i >= 4; --i) { 3300 int Val = SVOp->getMaskElt(i); 3301 if (Val >= 0) 3302 Mask |= (Val - 4); 3303 if (i != 4) 3304 Mask <<= 2; 3305 } 3306 return Mask; 3307} 3308 3309/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 3310/// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction. 3311unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 3312 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3313 unsigned Mask = 0; 3314 // 8 nodes, but we only care about the first 4. 3315 for (int i = 3; i >= 0; --i) { 3316 int Val = SVOp->getMaskElt(i); 3317 if (Val >= 0) 3318 Mask |= Val; 3319 if (i != 0) 3320 Mask <<= 2; 3321 } 3322 return Mask; 3323} 3324 3325/// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle 3326/// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction. 3327unsigned X86::getShufflePALIGNRImmediate(SDNode *N) { 3328 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3329 EVT VVT = N->getValueType(0); 3330 unsigned EltSize = VVT.getVectorElementType().getSizeInBits() >> 3; 3331 int Val = 0; 3332 3333 unsigned i, e; 3334 for (i = 0, e = VVT.getVectorNumElements(); i != e; ++i) { 3335 Val = SVOp->getMaskElt(i); 3336 if (Val >= 0) 3337 break; 3338 } 3339 return (Val - i) * EltSize; 3340} 3341 3342/// isZeroNode - Returns true if Elt is a constant zero or a floating point 3343/// constant +0.0. 3344bool X86::isZeroNode(SDValue Elt) { 3345 return ((isa<ConstantSDNode>(Elt) && 3346 cast<ConstantSDNode>(Elt)->isNullValue()) || 3347 (isa<ConstantFPSDNode>(Elt) && 3348 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 3349} 3350 3351/// CommuteVectorShuffle - Swap vector_shuffle operands as well as values in 3352/// their permute mask. 3353static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp, 3354 SelectionDAG &DAG) { 3355 EVT VT = SVOp->getValueType(0); 3356 unsigned NumElems = VT.getVectorNumElements(); 3357 SmallVector<int, 8> MaskVec; 3358 3359 for (unsigned i = 0; i != NumElems; ++i) { 3360 int idx = SVOp->getMaskElt(i); 3361 if (idx < 0) 3362 MaskVec.push_back(idx); 3363 else if (idx < (int)NumElems) 3364 MaskVec.push_back(idx + NumElems); 3365 else 3366 MaskVec.push_back(idx - NumElems); 3367 } 3368 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1), 3369 SVOp->getOperand(0), &MaskVec[0]); 3370} 3371 3372/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 3373/// the two vector operands have swapped position. 3374static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, EVT VT) { 3375 unsigned NumElems = VT.getVectorNumElements(); 3376 for (unsigned i = 0; i != NumElems; ++i) { 3377 int idx = Mask[i]; 3378 if (idx < 0) 3379 continue; 3380 else if (idx < (int)NumElems) 3381 Mask[i] = idx + NumElems; 3382 else 3383 Mask[i] = idx - NumElems; 3384 } 3385} 3386 3387/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 3388/// match movhlps. The lower half elements should come from upper half of 3389/// V1 (and in order), and the upper half elements should come from the upper 3390/// half of V2 (and in order). 3391static bool ShouldXformToMOVHLPS(ShuffleVectorSDNode *Op) { 3392 if (Op->getValueType(0).getVectorNumElements() != 4) 3393 return false; 3394 for (unsigned i = 0, e = 2; i != e; ++i) 3395 if (!isUndefOrEqual(Op->getMaskElt(i), i+2)) 3396 return false; 3397 for (unsigned i = 2; i != 4; ++i) 3398 if (!isUndefOrEqual(Op->getMaskElt(i), i+4)) 3399 return false; 3400 return true; 3401} 3402 3403/// isScalarLoadToVector - Returns true if the node is a scalar load that 3404/// is promoted to a vector. It also returns the LoadSDNode by reference if 3405/// required. 3406static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) { 3407 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR) 3408 return false; 3409 N = N->getOperand(0).getNode(); 3410 if (!ISD::isNON_EXTLoad(N)) 3411 return false; 3412 if (LD) 3413 *LD = cast<LoadSDNode>(N); 3414 return true; 3415} 3416 3417/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 3418/// match movlp{s|d}. The lower half elements should come from lower half of 3419/// V1 (and in order), and the upper half elements should come from the upper 3420/// half of V2 (and in order). And since V1 will become the source of the 3421/// MOVLP, it must be either a vector load or a scalar load to vector. 3422static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, 3423 ShuffleVectorSDNode *Op) { 3424 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 3425 return false; 3426 // Is V2 is a vector load, don't do this transformation. We will try to use 3427 // load folding shufps op. 3428 if (ISD::isNON_EXTLoad(V2)) 3429 return false; 3430 3431 unsigned NumElems = Op->getValueType(0).getVectorNumElements(); 3432 3433 if (NumElems != 2 && NumElems != 4) 3434 return false; 3435 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3436 if (!isUndefOrEqual(Op->getMaskElt(i), i)) 3437 return false; 3438 for (unsigned i = NumElems/2; i != NumElems; ++i) 3439 if (!isUndefOrEqual(Op->getMaskElt(i), i+NumElems)) 3440 return false; 3441 return true; 3442} 3443 3444/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 3445/// all the same. 3446static bool isSplatVector(SDNode *N) { 3447 if (N->getOpcode() != ISD::BUILD_VECTOR) 3448 return false; 3449 3450 SDValue SplatValue = N->getOperand(0); 3451 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 3452 if (N->getOperand(i) != SplatValue) 3453 return false; 3454 return true; 3455} 3456 3457/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 3458/// to an zero vector. 3459/// FIXME: move to dag combiner / method on ShuffleVectorSDNode 3460static bool isZeroShuffle(ShuffleVectorSDNode *N) { 3461 SDValue V1 = N->getOperand(0); 3462 SDValue V2 = N->getOperand(1); 3463 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 3464 for (unsigned i = 0; i != NumElems; ++i) { 3465 int Idx = N->getMaskElt(i); 3466 if (Idx >= (int)NumElems) { 3467 unsigned Opc = V2.getOpcode(); 3468 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode())) 3469 continue; 3470 if (Opc != ISD::BUILD_VECTOR || 3471 !X86::isZeroNode(V2.getOperand(Idx-NumElems))) 3472 return false; 3473 } else if (Idx >= 0) { 3474 unsigned Opc = V1.getOpcode(); 3475 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode())) 3476 continue; 3477 if (Opc != ISD::BUILD_VECTOR || 3478 !X86::isZeroNode(V1.getOperand(Idx))) 3479 return false; 3480 } 3481 } 3482 return true; 3483} 3484 3485/// getZeroVector - Returns a vector of specified type with all zero elements. 3486/// 3487static SDValue getZeroVector(EVT VT, bool HasSSE2, SelectionDAG &DAG, 3488 DebugLoc dl) { 3489 assert(VT.isVector() && "Expected a vector type"); 3490 3491 // Always build SSE zero vectors as <4 x i32> bitcasted 3492 // to their dest type. This ensures they get CSE'd. 3493 SDValue Vec; 3494 if (VT.getSizeInBits() == 128) { // SSE 3495 if (HasSSE2) { // SSE2 3496 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 3497 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 3498 } else { // SSE1 3499 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 3500 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); 3501 } 3502 } else if (VT.getSizeInBits() == 256) { // AVX 3503 // 256-bit logic and arithmetic instructions in AVX are 3504 // all floating-point, no support for integer ops. Default 3505 // to emitting fp zeroed vectors then. 3506 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 3507 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 3508 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8); 3509 } 3510 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec); 3511} 3512 3513/// getOnesVector - Returns a vector of specified type with all bits set. 3514/// 3515static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 3516 assert(VT.isVector() && "Expected a vector type"); 3517 3518 // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest 3519 // type. This ensures they get CSE'd. 3520 SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); 3521 SDValue Vec; 3522 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 3523 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec); 3524} 3525 3526 3527/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 3528/// that point to V2 points to its first element. 3529static SDValue NormalizeMask(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 3530 EVT VT = SVOp->getValueType(0); 3531 unsigned NumElems = VT.getVectorNumElements(); 3532 3533 bool Changed = false; 3534 SmallVector<int, 8> MaskVec; 3535 SVOp->getMask(MaskVec); 3536 3537 for (unsigned i = 0; i != NumElems; ++i) { 3538 if (MaskVec[i] > (int)NumElems) { 3539 MaskVec[i] = NumElems; 3540 Changed = true; 3541 } 3542 } 3543 if (Changed) 3544 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(0), 3545 SVOp->getOperand(1), &MaskVec[0]); 3546 return SDValue(SVOp, 0); 3547} 3548 3549/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 3550/// operation of specified width. 3551static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 3552 SDValue V2) { 3553 unsigned NumElems = VT.getVectorNumElements(); 3554 SmallVector<int, 8> Mask; 3555 Mask.push_back(NumElems); 3556 for (unsigned i = 1; i != NumElems; ++i) 3557 Mask.push_back(i); 3558 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 3559} 3560 3561/// getUnpackl - Returns a vector_shuffle node for an unpackl operation. 3562static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 3563 SDValue V2) { 3564 unsigned NumElems = VT.getVectorNumElements(); 3565 SmallVector<int, 8> Mask; 3566 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 3567 Mask.push_back(i); 3568 Mask.push_back(i + NumElems); 3569 } 3570 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 3571} 3572 3573/// getUnpackhMask - Returns a vector_shuffle node for an unpackh operation. 3574static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 3575 SDValue V2) { 3576 unsigned NumElems = VT.getVectorNumElements(); 3577 unsigned Half = NumElems/2; 3578 SmallVector<int, 8> Mask; 3579 for (unsigned i = 0; i != Half; ++i) { 3580 Mask.push_back(i + Half); 3581 Mask.push_back(i + NumElems + Half); 3582 } 3583 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 3584} 3585 3586/// PromoteSplat - Promote a splat of v4i32, v8i16 or v16i8 to v4f32. 3587static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { 3588 EVT PVT = MVT::v4f32; 3589 EVT VT = SV->getValueType(0); 3590 DebugLoc dl = SV->getDebugLoc(); 3591 SDValue V1 = SV->getOperand(0); 3592 int NumElems = VT.getVectorNumElements(); 3593 int EltNo = SV->getSplatIndex(); 3594 3595 // unpack elements to the correct location 3596 while (NumElems > 4) { 3597 if (EltNo < NumElems/2) { 3598 V1 = getUnpackl(DAG, dl, VT, V1, V1); 3599 } else { 3600 V1 = getUnpackh(DAG, dl, VT, V1, V1); 3601 EltNo -= NumElems/2; 3602 } 3603 NumElems >>= 1; 3604 } 3605 3606 // Perform the splat. 3607 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo }; 3608 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, PVT, V1); 3609 V1 = DAG.getVectorShuffle(PVT, dl, V1, DAG.getUNDEF(PVT), &SplatMask[0]); 3610 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, V1); 3611} 3612 3613/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 3614/// vector of zero or undef vector. This produces a shuffle where the low 3615/// element of V2 is swizzled into the zero/undef vector, landing at element 3616/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 3617static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, 3618 bool isZero, bool HasSSE2, 3619 SelectionDAG &DAG) { 3620 EVT VT = V2.getValueType(); 3621 SDValue V1 = isZero 3622 ? getZeroVector(VT, HasSSE2, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT); 3623 unsigned NumElems = VT.getVectorNumElements(); 3624 SmallVector<int, 16> MaskVec; 3625 for (unsigned i = 0; i != NumElems; ++i) 3626 // If this is the insertion idx, put the low elt of V2 here. 3627 MaskVec.push_back(i == Idx ? NumElems : i); 3628 return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]); 3629} 3630 3631/// getShuffleScalarElt - Returns the scalar element that will make up the ith 3632/// element of the result of the vector shuffle. 3633SDValue getShuffleScalarElt(SDNode *N, int Index, SelectionDAG &DAG, 3634 unsigned Depth) { 3635 if (Depth == 6) 3636 return SDValue(); // Limit search depth. 3637 3638 SDValue V = SDValue(N, 0); 3639 EVT VT = V.getValueType(); 3640 unsigned Opcode = V.getOpcode(); 3641 3642 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars. 3643 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) { 3644 Index = SV->getMaskElt(Index); 3645 3646 if (Index < 0) 3647 return DAG.getUNDEF(VT.getVectorElementType()); 3648 3649 int NumElems = VT.getVectorNumElements(); 3650 SDValue NewV = (Index < NumElems) ? SV->getOperand(0) : SV->getOperand(1); 3651 return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG, Depth+1); 3652 } 3653 3654 // Recurse into target specific vector shuffles to find scalars. 3655 if (isTargetShuffle(Opcode)) { 3656 int NumElems = VT.getVectorNumElements(); 3657 SmallVector<unsigned, 16> ShuffleMask; 3658 SDValue ImmN; 3659 3660 switch(Opcode) { 3661 case X86ISD::SHUFPS: 3662 case X86ISD::SHUFPD: 3663 ImmN = N->getOperand(N->getNumOperands()-1); 3664 DecodeSHUFPSMask(NumElems, 3665 cast<ConstantSDNode>(ImmN)->getZExtValue(), 3666 ShuffleMask); 3667 break; 3668 case X86ISD::PUNPCKHBW: 3669 case X86ISD::PUNPCKHWD: 3670 case X86ISD::PUNPCKHDQ: 3671 case X86ISD::PUNPCKHQDQ: 3672 DecodePUNPCKHMask(NumElems, ShuffleMask); 3673 break; 3674 case X86ISD::UNPCKHPS: 3675 case X86ISD::UNPCKHPD: 3676 DecodeUNPCKHPMask(NumElems, ShuffleMask); 3677 break; 3678 case X86ISD::PUNPCKLBW: 3679 case X86ISD::PUNPCKLWD: 3680 case X86ISD::PUNPCKLDQ: 3681 case X86ISD::PUNPCKLQDQ: 3682 DecodePUNPCKLMask(NumElems, ShuffleMask); 3683 break; 3684 case X86ISD::UNPCKLPS: 3685 case X86ISD::UNPCKLPD: 3686 DecodeUNPCKLPMask(NumElems, ShuffleMask); 3687 break; 3688 case X86ISD::MOVHLPS: 3689 DecodeMOVHLPSMask(NumElems, ShuffleMask); 3690 break; 3691 case X86ISD::MOVLHPS: 3692 DecodeMOVLHPSMask(NumElems, ShuffleMask); 3693 break; 3694 case X86ISD::PSHUFD: 3695 ImmN = N->getOperand(N->getNumOperands()-1); 3696 DecodePSHUFMask(NumElems, 3697 cast<ConstantSDNode>(ImmN)->getZExtValue(), 3698 ShuffleMask); 3699 break; 3700 case X86ISD::PSHUFHW: 3701 ImmN = N->getOperand(N->getNumOperands()-1); 3702 DecodePSHUFHWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), 3703 ShuffleMask); 3704 break; 3705 case X86ISD::PSHUFLW: 3706 ImmN = N->getOperand(N->getNumOperands()-1); 3707 DecodePSHUFLWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), 3708 ShuffleMask); 3709 break; 3710 case X86ISD::MOVSS: 3711 case X86ISD::MOVSD: { 3712 // The index 0 always comes from the first element of the second source, 3713 // this is why MOVSS and MOVSD are used in the first place. The other 3714 // elements come from the other positions of the first source vector. 3715 unsigned OpNum = (Index == 0) ? 1 : 0; 3716 return getShuffleScalarElt(V.getOperand(OpNum).getNode(), Index, DAG, 3717 Depth+1); 3718 } 3719 default: 3720 assert("not implemented for target shuffle node"); 3721 return SDValue(); 3722 } 3723 3724 Index = ShuffleMask[Index]; 3725 if (Index < 0) 3726 return DAG.getUNDEF(VT.getVectorElementType()); 3727 3728 SDValue NewV = (Index < NumElems) ? N->getOperand(0) : N->getOperand(1); 3729 return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG, 3730 Depth+1); 3731 } 3732 3733 // Actual nodes that may contain scalar elements 3734 if (Opcode == ISD::BIT_CONVERT) { 3735 V = V.getOperand(0); 3736 EVT SrcVT = V.getValueType(); 3737 unsigned NumElems = VT.getVectorNumElements(); 3738 3739 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems) 3740 return SDValue(); 3741 } 3742 3743 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) 3744 return (Index == 0) ? V.getOperand(0) 3745 : DAG.getUNDEF(VT.getVectorElementType()); 3746 3747 if (V.getOpcode() == ISD::BUILD_VECTOR) 3748 return V.getOperand(Index); 3749 3750 return SDValue(); 3751} 3752 3753/// getNumOfConsecutiveZeros - Return the number of elements of a vector 3754/// shuffle operation which come from a consecutively from a zero. The 3755/// search can start in two diferent directions, from left or right. 3756static 3757unsigned getNumOfConsecutiveZeros(SDNode *N, int NumElems, 3758 bool ZerosFromLeft, SelectionDAG &DAG) { 3759 int i = 0; 3760 3761 while (i < NumElems) { 3762 unsigned Index = ZerosFromLeft ? i : NumElems-i-1; 3763 SDValue Elt = getShuffleScalarElt(N, Index, DAG, 0); 3764 if (!(Elt.getNode() && 3765 (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt)))) 3766 break; 3767 ++i; 3768 } 3769 3770 return i; 3771} 3772 3773/// isShuffleMaskConsecutive - Check if the shuffle mask indicies from MaskI to 3774/// MaskE correspond consecutively to elements from one of the vector operands, 3775/// starting from its index OpIdx. Also tell OpNum which source vector operand. 3776static 3777bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, int MaskI, int MaskE, 3778 int OpIdx, int NumElems, unsigned &OpNum) { 3779 bool SeenV1 = false; 3780 bool SeenV2 = false; 3781 3782 for (int i = MaskI; i <= MaskE; ++i, ++OpIdx) { 3783 int Idx = SVOp->getMaskElt(i); 3784 // Ignore undef indicies 3785 if (Idx < 0) 3786 continue; 3787 3788 if (Idx < NumElems) 3789 SeenV1 = true; 3790 else 3791 SeenV2 = true; 3792 3793 // Only accept consecutive elements from the same vector 3794 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2)) 3795 return false; 3796 } 3797 3798 OpNum = SeenV1 ? 0 : 1; 3799 return true; 3800} 3801 3802/// isVectorShiftRight - Returns true if the shuffle can be implemented as a 3803/// logical left shift of a vector. 3804static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 3805 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 3806 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 3807 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 3808 false /* check zeros from right */, DAG); 3809 unsigned OpSrc; 3810 3811 if (!NumZeros) 3812 return false; 3813 3814 // Considering the elements in the mask that are not consecutive zeros, 3815 // check if they consecutively come from only one of the source vectors. 3816 // 3817 // V1 = {X, A, B, C} 0 3818 // \ \ \ / 3819 // vector_shuffle V1, V2 <1, 2, 3, X> 3820 // 3821 if (!isShuffleMaskConsecutive(SVOp, 3822 0, // Mask Start Index 3823 NumElems-NumZeros-1, // Mask End Index 3824 NumZeros, // Where to start looking in the src vector 3825 NumElems, // Number of elements in vector 3826 OpSrc)) // Which source operand ? 3827 return false; 3828 3829 isLeft = false; 3830 ShAmt = NumZeros; 3831 ShVal = SVOp->getOperand(OpSrc); 3832 return true; 3833} 3834 3835/// isVectorShiftLeft - Returns true if the shuffle can be implemented as a 3836/// logical left shift of a vector. 3837static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 3838 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 3839 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 3840 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 3841 true /* check zeros from left */, DAG); 3842 unsigned OpSrc; 3843 3844 if (!NumZeros) 3845 return false; 3846 3847 // Considering the elements in the mask that are not consecutive zeros, 3848 // check if they consecutively come from only one of the source vectors. 3849 // 3850 // 0 { A, B, X, X } = V2 3851 // / \ / / 3852 // vector_shuffle V1, V2 <X, X, 4, 5> 3853 // 3854 if (!isShuffleMaskConsecutive(SVOp, 3855 NumZeros, // Mask Start Index 3856 NumElems-1, // Mask End Index 3857 0, // Where to start looking in the src vector 3858 NumElems, // Number of elements in vector 3859 OpSrc)) // Which source operand ? 3860 return false; 3861 3862 isLeft = true; 3863 ShAmt = NumZeros; 3864 ShVal = SVOp->getOperand(OpSrc); 3865 return true; 3866} 3867 3868/// isVectorShift - Returns true if the shuffle can be implemented as a 3869/// logical left or right shift of a vector. 3870static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 3871 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 3872 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) || 3873 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt)) 3874 return true; 3875 3876 return false; 3877} 3878 3879/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 3880/// 3881static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, 3882 unsigned NumNonZero, unsigned NumZero, 3883 SelectionDAG &DAG, 3884 const TargetLowering &TLI) { 3885 if (NumNonZero > 8) 3886 return SDValue(); 3887 3888 DebugLoc dl = Op.getDebugLoc(); 3889 SDValue V(0, 0); 3890 bool First = true; 3891 for (unsigned i = 0; i < 16; ++i) { 3892 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 3893 if (ThisIsNonZero && First) { 3894 if (NumZero) 3895 V = getZeroVector(MVT::v8i16, true, DAG, dl); 3896 else 3897 V = DAG.getUNDEF(MVT::v8i16); 3898 First = false; 3899 } 3900 3901 if ((i & 1) != 0) { 3902 SDValue ThisElt(0, 0), LastElt(0, 0); 3903 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 3904 if (LastIsNonZero) { 3905 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl, 3906 MVT::i16, Op.getOperand(i-1)); 3907 } 3908 if (ThisIsNonZero) { 3909 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i)); 3910 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, 3911 ThisElt, DAG.getConstant(8, MVT::i8)); 3912 if (LastIsNonZero) 3913 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt); 3914 } else 3915 ThisElt = LastElt; 3916 3917 if (ThisElt.getNode()) 3918 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt, 3919 DAG.getIntPtrConstant(i/2)); 3920 } 3921 } 3922 3923 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V); 3924} 3925 3926/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 3927/// 3928static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, 3929 unsigned NumNonZero, unsigned NumZero, 3930 SelectionDAG &DAG, 3931 const TargetLowering &TLI) { 3932 if (NumNonZero > 4) 3933 return SDValue(); 3934 3935 DebugLoc dl = Op.getDebugLoc(); 3936 SDValue V(0, 0); 3937 bool First = true; 3938 for (unsigned i = 0; i < 8; ++i) { 3939 bool isNonZero = (NonZeros & (1 << i)) != 0; 3940 if (isNonZero) { 3941 if (First) { 3942 if (NumZero) 3943 V = getZeroVector(MVT::v8i16, true, DAG, dl); 3944 else 3945 V = DAG.getUNDEF(MVT::v8i16); 3946 First = false; 3947 } 3948 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, 3949 MVT::v8i16, V, Op.getOperand(i), 3950 DAG.getIntPtrConstant(i)); 3951 } 3952 } 3953 3954 return V; 3955} 3956 3957/// getVShift - Return a vector logical shift node. 3958/// 3959static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, 3960 unsigned NumBits, SelectionDAG &DAG, 3961 const TargetLowering &TLI, DebugLoc dl) { 3962 EVT ShVT = MVT::v2i64; 3963 unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL; 3964 SrcOp = DAG.getNode(ISD::BIT_CONVERT, dl, ShVT, SrcOp); 3965 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, 3966 DAG.getNode(Opc, dl, ShVT, SrcOp, 3967 DAG.getConstant(NumBits, TLI.getShiftAmountTy()))); 3968} 3969 3970SDValue 3971X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, 3972 SelectionDAG &DAG) const { 3973 3974 // Check if the scalar load can be widened into a vector load. And if 3975 // the address is "base + cst" see if the cst can be "absorbed" into 3976 // the shuffle mask. 3977 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) { 3978 SDValue Ptr = LD->getBasePtr(); 3979 if (!ISD::isNormalLoad(LD) || LD->isVolatile()) 3980 return SDValue(); 3981 EVT PVT = LD->getValueType(0); 3982 if (PVT != MVT::i32 && PVT != MVT::f32) 3983 return SDValue(); 3984 3985 int FI = -1; 3986 int64_t Offset = 0; 3987 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) { 3988 FI = FINode->getIndex(); 3989 Offset = 0; 3990 } else if (Ptr.getOpcode() == ISD::ADD && 3991 isa<ConstantSDNode>(Ptr.getOperand(1)) && 3992 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 3993 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 3994 Offset = Ptr.getConstantOperandVal(1); 3995 Ptr = Ptr.getOperand(0); 3996 } else { 3997 return SDValue(); 3998 } 3999 4000 SDValue Chain = LD->getChain(); 4001 // Make sure the stack object alignment is at least 16. 4002 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 4003 if (DAG.InferPtrAlignment(Ptr) < 16) { 4004 if (MFI->isFixedObjectIndex(FI)) { 4005 // Can't change the alignment. FIXME: It's possible to compute 4006 // the exact stack offset and reference FI + adjust offset instead. 4007 // If someone *really* cares about this. That's the way to implement it. 4008 return SDValue(); 4009 } else { 4010 MFI->setObjectAlignment(FI, 16); 4011 } 4012 } 4013 4014 // (Offset % 16) must be multiple of 4. Then address is then 4015 // Ptr + (Offset & ~15). 4016 if (Offset < 0) 4017 return SDValue(); 4018 if ((Offset % 16) & 3) 4019 return SDValue(); 4020 int64_t StartOffset = Offset & ~15; 4021 if (StartOffset) 4022 Ptr = DAG.getNode(ISD::ADD, Ptr.getDebugLoc(), Ptr.getValueType(), 4023 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType())); 4024 4025 int EltNo = (Offset - StartOffset) >> 2; 4026 int Mask[4] = { EltNo, EltNo, EltNo, EltNo }; 4027 EVT VT = (PVT == MVT::i32) ? MVT::v4i32 : MVT::v4f32; 4028 SDValue V1 = DAG.getLoad(VT, dl, Chain, Ptr, 4029 LD->getPointerInfo().getWithOffset(StartOffset), 4030 false, false, 0); 4031 // Canonicalize it to a v4i32 shuffle. 4032 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, V1); 4033 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, 4034 DAG.getVectorShuffle(MVT::v4i32, dl, V1, 4035 DAG.getUNDEF(MVT::v4i32),&Mask[0])); 4036 } 4037 4038 return SDValue(); 4039} 4040 4041/// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a 4042/// vector of type 'VT', see if the elements can be replaced by a single large 4043/// load which has the same value as a build_vector whose operands are 'elts'. 4044/// 4045/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a 4046/// 4047/// FIXME: we'd also like to handle the case where the last elements are zero 4048/// rather than undef via VZEXT_LOAD, but we do not detect that case today. 4049/// There's even a handy isZeroNode for that purpose. 4050static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, 4051 DebugLoc &DL, SelectionDAG &DAG) { 4052 EVT EltVT = VT.getVectorElementType(); 4053 unsigned NumElems = Elts.size(); 4054 4055 LoadSDNode *LDBase = NULL; 4056 unsigned LastLoadedElt = -1U; 4057 4058 // For each element in the initializer, see if we've found a load or an undef. 4059 // If we don't find an initial load element, or later load elements are 4060 // non-consecutive, bail out. 4061 for (unsigned i = 0; i < NumElems; ++i) { 4062 SDValue Elt = Elts[i]; 4063 4064 if (!Elt.getNode() || 4065 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode()))) 4066 return SDValue(); 4067 if (!LDBase) { 4068 if (Elt.getNode()->getOpcode() == ISD::UNDEF) 4069 return SDValue(); 4070 LDBase = cast<LoadSDNode>(Elt.getNode()); 4071 LastLoadedElt = i; 4072 continue; 4073 } 4074 if (Elt.getOpcode() == ISD::UNDEF) 4075 continue; 4076 4077 LoadSDNode *LD = cast<LoadSDNode>(Elt); 4078 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i)) 4079 return SDValue(); 4080 LastLoadedElt = i; 4081 } 4082 4083 // If we have found an entire vector of loads and undefs, then return a large 4084 // load of the entire vector width starting at the base pointer. If we found 4085 // consecutive loads for the low half, generate a vzext_load node. 4086 if (LastLoadedElt == NumElems - 1) { 4087 if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16) 4088 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 4089 LDBase->getPointerInfo(), 4090 LDBase->isVolatile(), LDBase->isNonTemporal(), 0); 4091 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 4092 LDBase->getPointerInfo(), 4093 LDBase->isVolatile(), LDBase->isNonTemporal(), 4094 LDBase->getAlignment()); 4095 } else if (NumElems == 4 && LastLoadedElt == 1) { 4096 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other); 4097 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() }; 4098 SDValue ResNode = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, 4099 Ops, 2, MVT::i32, 4100 LDBase->getMemOperand()); 4101 return DAG.getNode(ISD::BIT_CONVERT, DL, VT, ResNode); 4102 } 4103 return SDValue(); 4104} 4105 4106SDValue 4107X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { 4108 DebugLoc dl = Op.getDebugLoc(); 4109 // All zero's are handled with pxor in SSE2 and above, xorps in SSE1. 4110 // All one's are handled with pcmpeqd. In AVX, zero's are handled with 4111 // vpxor in 128-bit and xor{pd,ps} in 256-bit, but no 256 version of pcmpeqd 4112 // is present, so AllOnes is ignored. 4113 if (ISD::isBuildVectorAllZeros(Op.getNode()) || 4114 (Op.getValueType().getSizeInBits() != 256 && 4115 ISD::isBuildVectorAllOnes(Op.getNode()))) { 4116 // Canonicalize this to <4 x i32> (SSE) to 4117 // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are 4118 // eliminated on x86-32 hosts. 4119 if (Op.getValueType() == MVT::v4i32) 4120 return Op; 4121 4122 if (ISD::isBuildVectorAllOnes(Op.getNode())) 4123 return getOnesVector(Op.getValueType(), DAG, dl); 4124 return getZeroVector(Op.getValueType(), Subtarget->hasSSE2(), DAG, dl); 4125 } 4126 4127 EVT VT = Op.getValueType(); 4128 EVT ExtVT = VT.getVectorElementType(); 4129 unsigned EVTBits = ExtVT.getSizeInBits(); 4130 4131 unsigned NumElems = Op.getNumOperands(); 4132 unsigned NumZero = 0; 4133 unsigned NumNonZero = 0; 4134 unsigned NonZeros = 0; 4135 bool IsAllConstants = true; 4136 SmallSet<SDValue, 8> Values; 4137 for (unsigned i = 0; i < NumElems; ++i) { 4138 SDValue Elt = Op.getOperand(i); 4139 if (Elt.getOpcode() == ISD::UNDEF) 4140 continue; 4141 Values.insert(Elt); 4142 if (Elt.getOpcode() != ISD::Constant && 4143 Elt.getOpcode() != ISD::ConstantFP) 4144 IsAllConstants = false; 4145 if (X86::isZeroNode(Elt)) 4146 NumZero++; 4147 else { 4148 NonZeros |= (1 << i); 4149 NumNonZero++; 4150 } 4151 } 4152 4153 // All undef vector. Return an UNDEF. All zero vectors were handled above. 4154 if (NumNonZero == 0) 4155 return DAG.getUNDEF(VT); 4156 4157 // Special case for single non-zero, non-undef, element. 4158 if (NumNonZero == 1) { 4159 unsigned Idx = CountTrailingZeros_32(NonZeros); 4160 SDValue Item = Op.getOperand(Idx); 4161 4162 // If this is an insertion of an i64 value on x86-32, and if the top bits of 4163 // the value are obviously zero, truncate the value to i32 and do the 4164 // insertion that way. Only do this if the value is non-constant or if the 4165 // value is a constant being inserted into element 0. It is cheaper to do 4166 // a constant pool load than it is to do a movd + shuffle. 4167 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() && 4168 (!IsAllConstants || Idx == 0)) { 4169 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 4170 // Handle SSE only. 4171 assert(VT == MVT::v2i64 && "Expected an SSE value type!"); 4172 EVT VecVT = MVT::v4i32; 4173 unsigned VecElts = 4; 4174 4175 // Truncate the value (which may itself be a constant) to i32, and 4176 // convert it to a vector with movd (S2V+shuffle to zero extend). 4177 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item); 4178 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item); 4179 Item = getShuffleVectorZeroOrUndef(Item, 0, true, 4180 Subtarget->hasSSE2(), DAG); 4181 4182 // Now we have our 32-bit value zero extended in the low element of 4183 // a vector. If Idx != 0, swizzle it into place. 4184 if (Idx != 0) { 4185 SmallVector<int, 4> Mask; 4186 Mask.push_back(Idx); 4187 for (unsigned i = 1; i != VecElts; ++i) 4188 Mask.push_back(i); 4189 Item = DAG.getVectorShuffle(VecVT, dl, Item, 4190 DAG.getUNDEF(Item.getValueType()), 4191 &Mask[0]); 4192 } 4193 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Item); 4194 } 4195 } 4196 4197 // If we have a constant or non-constant insertion into the low element of 4198 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 4199 // the rest of the elements. This will be matched as movd/movq/movss/movsd 4200 // depending on what the source datatype is. 4201 if (Idx == 0) { 4202 if (NumZero == 0) { 4203 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 4204 } else if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 || 4205 (ExtVT == MVT::i64 && Subtarget->is64Bit())) { 4206 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 4207 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 4208 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget->hasSSE2(), 4209 DAG); 4210 } else if (ExtVT == MVT::i16 || ExtVT == MVT::i8) { 4211 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item); 4212 assert(VT.getSizeInBits() == 128 && "Expected an SSE value type!"); 4213 EVT MiddleVT = MVT::v4i32; 4214 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MiddleVT, Item); 4215 Item = getShuffleVectorZeroOrUndef(Item, 0, true, 4216 Subtarget->hasSSE2(), DAG); 4217 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Item); 4218 } 4219 } 4220 4221 // Is it a vector logical left shift? 4222 if (NumElems == 2 && Idx == 1 && 4223 X86::isZeroNode(Op.getOperand(0)) && 4224 !X86::isZeroNode(Op.getOperand(1))) { 4225 unsigned NumBits = VT.getSizeInBits(); 4226 return getVShift(true, VT, 4227 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 4228 VT, Op.getOperand(1)), 4229 NumBits/2, DAG, *this, dl); 4230 } 4231 4232 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 4233 return SDValue(); 4234 4235 // Otherwise, if this is a vector with i32 or f32 elements, and the element 4236 // is a non-constant being inserted into an element other than the low one, 4237 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 4238 // movd/movss) to move this into the low element, then shuffle it into 4239 // place. 4240 if (EVTBits == 32) { 4241 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 4242 4243 // Turn it into a shuffle of zero and zero-extended scalar to vector. 4244 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, 4245 Subtarget->hasSSE2(), DAG); 4246 SmallVector<int, 8> MaskVec; 4247 for (unsigned i = 0; i < NumElems; i++) 4248 MaskVec.push_back(i == Idx ? 0 : 1); 4249 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]); 4250 } 4251 } 4252 4253 // Splat is obviously ok. Let legalizer expand it to a shuffle. 4254 if (Values.size() == 1) { 4255 if (EVTBits == 32) { 4256 // Instead of a shuffle like this: 4257 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0> 4258 // Check if it's possible to issue this instead. 4259 // shuffle (vload ptr)), undef, <1, 1, 1, 1> 4260 unsigned Idx = CountTrailingZeros_32(NonZeros); 4261 SDValue Item = Op.getOperand(Idx); 4262 if (Op.getNode()->isOnlyUserOf(Item.getNode())) 4263 return LowerAsSplatVectorLoad(Item, VT, dl, DAG); 4264 } 4265 return SDValue(); 4266 } 4267 4268 // A vector full of immediates; various special cases are already 4269 // handled, so this is best done with a single constant-pool load. 4270 if (IsAllConstants) 4271 return SDValue(); 4272 4273 // Let legalizer expand 2-wide build_vectors. 4274 if (EVTBits == 64) { 4275 if (NumNonZero == 1) { 4276 // One half is zero or undef. 4277 unsigned Idx = CountTrailingZeros_32(NonZeros); 4278 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, 4279 Op.getOperand(Idx)); 4280 return getShuffleVectorZeroOrUndef(V2, Idx, true, 4281 Subtarget->hasSSE2(), DAG); 4282 } 4283 return SDValue(); 4284 } 4285 4286 // If element VT is < 32 bits, convert it to inserts into a zero vector. 4287 if (EVTBits == 8 && NumElems == 16) { 4288 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 4289 *this); 4290 if (V.getNode()) return V; 4291 } 4292 4293 if (EVTBits == 16 && NumElems == 8) { 4294 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 4295 *this); 4296 if (V.getNode()) return V; 4297 } 4298 4299 // If element VT is == 32 bits, turn it into a number of shuffles. 4300 SmallVector<SDValue, 8> V; 4301 V.resize(NumElems); 4302 if (NumElems == 4 && NumZero > 0) { 4303 for (unsigned i = 0; i < 4; ++i) { 4304 bool isZero = !(NonZeros & (1 << i)); 4305 if (isZero) 4306 V[i] = getZeroVector(VT, Subtarget->hasSSE2(), DAG, dl); 4307 else 4308 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 4309 } 4310 4311 for (unsigned i = 0; i < 2; ++i) { 4312 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 4313 default: break; 4314 case 0: 4315 V[i] = V[i*2]; // Must be a zero vector. 4316 break; 4317 case 1: 4318 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]); 4319 break; 4320 case 2: 4321 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]); 4322 break; 4323 case 3: 4324 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]); 4325 break; 4326 } 4327 } 4328 4329 SmallVector<int, 8> MaskVec; 4330 bool Reverse = (NonZeros & 0x3) == 2; 4331 for (unsigned i = 0; i < 2; ++i) 4332 MaskVec.push_back(Reverse ? 1-i : i); 4333 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 4334 for (unsigned i = 0; i < 2; ++i) 4335 MaskVec.push_back(Reverse ? 1-i+NumElems : i+NumElems); 4336 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]); 4337 } 4338 4339 if (Values.size() > 1 && VT.getSizeInBits() == 128) { 4340 // Check for a build vector of consecutive loads. 4341 for (unsigned i = 0; i < NumElems; ++i) 4342 V[i] = Op.getOperand(i); 4343 4344 // Check for elements which are consecutive loads. 4345 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG); 4346 if (LD.getNode()) 4347 return LD; 4348 4349 // For SSE 4.1, use insertps to put the high elements into the low element. 4350 if (getSubtarget()->hasSSE41()) { 4351 SDValue Result; 4352 if (Op.getOperand(0).getOpcode() != ISD::UNDEF) 4353 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); 4354 else 4355 Result = DAG.getUNDEF(VT); 4356 4357 for (unsigned i = 1; i < NumElems; ++i) { 4358 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue; 4359 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, 4360 Op.getOperand(i), DAG.getIntPtrConstant(i)); 4361 } 4362 return Result; 4363 } 4364 4365 // Otherwise, expand into a number of unpckl*, start by extending each of 4366 // our (non-undef) elements to the full vector width with the element in the 4367 // bottom slot of the vector (which generates no code for SSE). 4368 for (unsigned i = 0; i < NumElems; ++i) { 4369 if (Op.getOperand(i).getOpcode() != ISD::UNDEF) 4370 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 4371 else 4372 V[i] = DAG.getUNDEF(VT); 4373 } 4374 4375 // Next, we iteratively mix elements, e.g. for v4f32: 4376 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 4377 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 4378 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 4379 unsigned EltStride = NumElems >> 1; 4380 while (EltStride != 0) { 4381 for (unsigned i = 0; i < EltStride; ++i) { 4382 // If V[i+EltStride] is undef and this is the first round of mixing, 4383 // then it is safe to just drop this shuffle: V[i] is already in the 4384 // right place, the one element (since it's the first round) being 4385 // inserted as undef can be dropped. This isn't safe for successive 4386 // rounds because they will permute elements within both vectors. 4387 if (V[i+EltStride].getOpcode() == ISD::UNDEF && 4388 EltStride == NumElems/2) 4389 continue; 4390 4391 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]); 4392 } 4393 EltStride >>= 1; 4394 } 4395 return V[0]; 4396 } 4397 return SDValue(); 4398} 4399 4400SDValue 4401X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const { 4402 // We support concatenate two MMX registers and place them in a MMX 4403 // register. This is better than doing a stack convert. 4404 DebugLoc dl = Op.getDebugLoc(); 4405 EVT ResVT = Op.getValueType(); 4406 assert(Op.getNumOperands() == 2); 4407 assert(ResVT == MVT::v2i64 || ResVT == MVT::v4i32 || 4408 ResVT == MVT::v8i16 || ResVT == MVT::v16i8); 4409 int Mask[2]; 4410 SDValue InVec = DAG.getNode(ISD::BIT_CONVERT,dl, MVT::v1i64, Op.getOperand(0)); 4411 SDValue VecOp = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec); 4412 InVec = Op.getOperand(1); 4413 if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4414 unsigned NumElts = ResVT.getVectorNumElements(); 4415 VecOp = DAG.getNode(ISD::BIT_CONVERT, dl, ResVT, VecOp); 4416 VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ResVT, VecOp, 4417 InVec.getOperand(0), DAG.getIntPtrConstant(NumElts/2+1)); 4418 } else { 4419 InVec = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v1i64, InVec); 4420 SDValue VecOp2 = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec); 4421 Mask[0] = 0; Mask[1] = 2; 4422 VecOp = DAG.getVectorShuffle(MVT::v2i64, dl, VecOp, VecOp2, Mask); 4423 } 4424 return DAG.getNode(ISD::BIT_CONVERT, dl, ResVT, VecOp); 4425} 4426 4427// v8i16 shuffles - Prefer shuffles in the following order: 4428// 1. [all] pshuflw, pshufhw, optional move 4429// 2. [ssse3] 1 x pshufb 4430// 3. [ssse3] 2 x pshufb + 1 x por 4431// 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw) 4432SDValue 4433X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op, 4434 SelectionDAG &DAG) const { 4435 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 4436 SDValue V1 = SVOp->getOperand(0); 4437 SDValue V2 = SVOp->getOperand(1); 4438 DebugLoc dl = SVOp->getDebugLoc(); 4439 SmallVector<int, 8> MaskVals; 4440 4441 // Determine if more than 1 of the words in each of the low and high quadwords 4442 // of the result come from the same quadword of one of the two inputs. Undef 4443 // mask values count as coming from any quadword, for better codegen. 4444 SmallVector<unsigned, 4> LoQuad(4); 4445 SmallVector<unsigned, 4> HiQuad(4); 4446 BitVector InputQuads(4); 4447 for (unsigned i = 0; i < 8; ++i) { 4448 SmallVectorImpl<unsigned> &Quad = i < 4 ? LoQuad : HiQuad; 4449 int EltIdx = SVOp->getMaskElt(i); 4450 MaskVals.push_back(EltIdx); 4451 if (EltIdx < 0) { 4452 ++Quad[0]; 4453 ++Quad[1]; 4454 ++Quad[2]; 4455 ++Quad[3]; 4456 continue; 4457 } 4458 ++Quad[EltIdx / 4]; 4459 InputQuads.set(EltIdx / 4); 4460 } 4461 4462 int BestLoQuad = -1; 4463 unsigned MaxQuad = 1; 4464 for (unsigned i = 0; i < 4; ++i) { 4465 if (LoQuad[i] > MaxQuad) { 4466 BestLoQuad = i; 4467 MaxQuad = LoQuad[i]; 4468 } 4469 } 4470 4471 int BestHiQuad = -1; 4472 MaxQuad = 1; 4473 for (unsigned i = 0; i < 4; ++i) { 4474 if (HiQuad[i] > MaxQuad) { 4475 BestHiQuad = i; 4476 MaxQuad = HiQuad[i]; 4477 } 4478 } 4479 4480 // For SSSE3, If all 8 words of the result come from only 1 quadword of each 4481 // of the two input vectors, shuffle them into one input vector so only a 4482 // single pshufb instruction is necessary. If There are more than 2 input 4483 // quads, disable the next transformation since it does not help SSSE3. 4484 bool V1Used = InputQuads[0] || InputQuads[1]; 4485 bool V2Used = InputQuads[2] || InputQuads[3]; 4486 if (Subtarget->hasSSSE3()) { 4487 if (InputQuads.count() == 2 && V1Used && V2Used) { 4488 BestLoQuad = InputQuads.find_first(); 4489 BestHiQuad = InputQuads.find_next(BestLoQuad); 4490 } 4491 if (InputQuads.count() > 2) { 4492 BestLoQuad = -1; 4493 BestHiQuad = -1; 4494 } 4495 } 4496 4497 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update 4498 // the shuffle mask. If a quad is scored as -1, that means that it contains 4499 // words from all 4 input quadwords. 4500 SDValue NewV; 4501 if (BestLoQuad >= 0 || BestHiQuad >= 0) { 4502 SmallVector<int, 8> MaskV; 4503 MaskV.push_back(BestLoQuad < 0 ? 0 : BestLoQuad); 4504 MaskV.push_back(BestHiQuad < 0 ? 1 : BestHiQuad); 4505 NewV = DAG.getVectorShuffle(MVT::v2i64, dl, 4506 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, V1), 4507 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, V2), &MaskV[0]); 4508 NewV = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, NewV); 4509 4510 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the 4511 // source words for the shuffle, to aid later transformations. 4512 bool AllWordsInNewV = true; 4513 bool InOrder[2] = { true, true }; 4514 for (unsigned i = 0; i != 8; ++i) { 4515 int idx = MaskVals[i]; 4516 if (idx != (int)i) 4517 InOrder[i/4] = false; 4518 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad) 4519 continue; 4520 AllWordsInNewV = false; 4521 break; 4522 } 4523 4524 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV; 4525 if (AllWordsInNewV) { 4526 for (int i = 0; i != 8; ++i) { 4527 int idx = MaskVals[i]; 4528 if (idx < 0) 4529 continue; 4530 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4; 4531 if ((idx != i) && idx < 4) 4532 pshufhw = false; 4533 if ((idx != i) && idx > 3) 4534 pshuflw = false; 4535 } 4536 V1 = NewV; 4537 V2Used = false; 4538 BestLoQuad = 0; 4539 BestHiQuad = 1; 4540 } 4541 4542 // If we've eliminated the use of V2, and the new mask is a pshuflw or 4543 // pshufhw, that's as cheap as it gets. Return the new shuffle. 4544 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) { 4545 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW; 4546 unsigned TargetMask = 0; 4547 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, 4548 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]); 4549 TargetMask = pshufhw ? X86::getShufflePSHUFHWImmediate(NewV.getNode()): 4550 X86::getShufflePSHUFLWImmediate(NewV.getNode()); 4551 V1 = NewV.getOperand(0); 4552 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG); 4553 } 4554 } 4555 4556 // If we have SSSE3, and all words of the result are from 1 input vector, 4557 // case 2 is generated, otherwise case 3 is generated. If no SSSE3 4558 // is present, fall back to case 4. 4559 if (Subtarget->hasSSSE3()) { 4560 SmallVector<SDValue,16> pshufbMask; 4561 4562 // If we have elements from both input vectors, set the high bit of the 4563 // shuffle mask element to zero out elements that come from V2 in the V1 4564 // mask, and elements that come from V1 in the V2 mask, so that the two 4565 // results can be OR'd together. 4566 bool TwoInputs = V1Used && V2Used; 4567 for (unsigned i = 0; i != 8; ++i) { 4568 int EltIdx = MaskVals[i] * 2; 4569 if (TwoInputs && (EltIdx >= 16)) { 4570 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 4571 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 4572 continue; 4573 } 4574 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 4575 pshufbMask.push_back(DAG.getConstant(EltIdx+1, MVT::i8)); 4576 } 4577 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V1); 4578 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 4579 DAG.getNode(ISD::BUILD_VECTOR, dl, 4580 MVT::v16i8, &pshufbMask[0], 16)); 4581 if (!TwoInputs) 4582 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1); 4583 4584 // Calculate the shuffle mask for the second input, shuffle it, and 4585 // OR it with the first shuffled input. 4586 pshufbMask.clear(); 4587 for (unsigned i = 0; i != 8; ++i) { 4588 int EltIdx = MaskVals[i] * 2; 4589 if (EltIdx < 16) { 4590 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 4591 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 4592 continue; 4593 } 4594 pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8)); 4595 pshufbMask.push_back(DAG.getConstant(EltIdx - 15, MVT::i8)); 4596 } 4597 V2 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V2); 4598 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 4599 DAG.getNode(ISD::BUILD_VECTOR, dl, 4600 MVT::v16i8, &pshufbMask[0], 16)); 4601 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 4602 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1); 4603 } 4604 4605 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order, 4606 // and update MaskVals with new element order. 4607 BitVector InOrder(8); 4608 if (BestLoQuad >= 0) { 4609 SmallVector<int, 8> MaskV; 4610 for (int i = 0; i != 4; ++i) { 4611 int idx = MaskVals[i]; 4612 if (idx < 0) { 4613 MaskV.push_back(-1); 4614 InOrder.set(i); 4615 } else if ((idx / 4) == BestLoQuad) { 4616 MaskV.push_back(idx & 3); 4617 InOrder.set(i); 4618 } else { 4619 MaskV.push_back(-1); 4620 } 4621 } 4622 for (unsigned i = 4; i != 8; ++i) 4623 MaskV.push_back(i); 4624 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 4625 &MaskV[0]); 4626 4627 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) 4628 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16, 4629 NewV.getOperand(0), 4630 X86::getShufflePSHUFLWImmediate(NewV.getNode()), 4631 DAG); 4632 } 4633 4634 // If BestHi >= 0, generate a pshufhw to put the high elements in order, 4635 // and update MaskVals with the new element order. 4636 if (BestHiQuad >= 0) { 4637 SmallVector<int, 8> MaskV; 4638 for (unsigned i = 0; i != 4; ++i) 4639 MaskV.push_back(i); 4640 for (unsigned i = 4; i != 8; ++i) { 4641 int idx = MaskVals[i]; 4642 if (idx < 0) { 4643 MaskV.push_back(-1); 4644 InOrder.set(i); 4645 } else if ((idx / 4) == BestHiQuad) { 4646 MaskV.push_back((idx & 3) + 4); 4647 InOrder.set(i); 4648 } else { 4649 MaskV.push_back(-1); 4650 } 4651 } 4652 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 4653 &MaskV[0]); 4654 4655 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) 4656 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16, 4657 NewV.getOperand(0), 4658 X86::getShufflePSHUFHWImmediate(NewV.getNode()), 4659 DAG); 4660 } 4661 4662 // In case BestHi & BestLo were both -1, which means each quadword has a word 4663 // from each of the four input quadwords, calculate the InOrder bitvector now 4664 // before falling through to the insert/extract cleanup. 4665 if (BestLoQuad == -1 && BestHiQuad == -1) { 4666 NewV = V1; 4667 for (int i = 0; i != 8; ++i) 4668 if (MaskVals[i] < 0 || MaskVals[i] == i) 4669 InOrder.set(i); 4670 } 4671 4672 // The other elements are put in the right place using pextrw and pinsrw. 4673 for (unsigned i = 0; i != 8; ++i) { 4674 if (InOrder[i]) 4675 continue; 4676 int EltIdx = MaskVals[i]; 4677 if (EltIdx < 0) 4678 continue; 4679 SDValue ExtOp = (EltIdx < 8) 4680 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1, 4681 DAG.getIntPtrConstant(EltIdx)) 4682 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2, 4683 DAG.getIntPtrConstant(EltIdx - 8)); 4684 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp, 4685 DAG.getIntPtrConstant(i)); 4686 } 4687 return NewV; 4688} 4689 4690// v16i8 shuffles - Prefer shuffles in the following order: 4691// 1. [ssse3] 1 x pshufb 4692// 2. [ssse3] 2 x pshufb + 1 x por 4693// 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw 4694static 4695SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, 4696 SelectionDAG &DAG, 4697 const X86TargetLowering &TLI) { 4698 SDValue V1 = SVOp->getOperand(0); 4699 SDValue V2 = SVOp->getOperand(1); 4700 DebugLoc dl = SVOp->getDebugLoc(); 4701 SmallVector<int, 16> MaskVals; 4702 SVOp->getMask(MaskVals); 4703 4704 // If we have SSSE3, case 1 is generated when all result bytes come from 4705 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is 4706 // present, fall back to case 3. 4707 // FIXME: kill V2Only once shuffles are canonizalized by getNode. 4708 bool V1Only = true; 4709 bool V2Only = true; 4710 for (unsigned i = 0; i < 16; ++i) { 4711 int EltIdx = MaskVals[i]; 4712 if (EltIdx < 0) 4713 continue; 4714 if (EltIdx < 16) 4715 V2Only = false; 4716 else 4717 V1Only = false; 4718 } 4719 4720 // If SSSE3, use 1 pshufb instruction per vector with elements in the result. 4721 if (TLI.getSubtarget()->hasSSSE3()) { 4722 SmallVector<SDValue,16> pshufbMask; 4723 4724 // If all result elements are from one input vector, then only translate 4725 // undef mask values to 0x80 (zero out result) in the pshufb mask. 4726 // 4727 // Otherwise, we have elements from both input vectors, and must zero out 4728 // elements that come from V2 in the first mask, and V1 in the second mask 4729 // so that we can OR them together. 4730 bool TwoInputs = !(V1Only || V2Only); 4731 for (unsigned i = 0; i != 16; ++i) { 4732 int EltIdx = MaskVals[i]; 4733 if (EltIdx < 0 || (TwoInputs && EltIdx >= 16)) { 4734 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 4735 continue; 4736 } 4737 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 4738 } 4739 // If all the elements are from V2, assign it to V1 and return after 4740 // building the first pshufb. 4741 if (V2Only) 4742 V1 = V2; 4743 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 4744 DAG.getNode(ISD::BUILD_VECTOR, dl, 4745 MVT::v16i8, &pshufbMask[0], 16)); 4746 if (!TwoInputs) 4747 return V1; 4748 4749 // Calculate the shuffle mask for the second input, shuffle it, and 4750 // OR it with the first shuffled input. 4751 pshufbMask.clear(); 4752 for (unsigned i = 0; i != 16; ++i) { 4753 int EltIdx = MaskVals[i]; 4754 if (EltIdx < 16) { 4755 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 4756 continue; 4757 } 4758 pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8)); 4759 } 4760 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 4761 DAG.getNode(ISD::BUILD_VECTOR, dl, 4762 MVT::v16i8, &pshufbMask[0], 16)); 4763 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 4764 } 4765 4766 // No SSSE3 - Calculate in place words and then fix all out of place words 4767 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from 4768 // the 16 different words that comprise the two doublequadword input vectors. 4769 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1); 4770 V2 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V2); 4771 SDValue NewV = V2Only ? V2 : V1; 4772 for (int i = 0; i != 8; ++i) { 4773 int Elt0 = MaskVals[i*2]; 4774 int Elt1 = MaskVals[i*2+1]; 4775 4776 // This word of the result is all undef, skip it. 4777 if (Elt0 < 0 && Elt1 < 0) 4778 continue; 4779 4780 // This word of the result is already in the correct place, skip it. 4781 if (V1Only && (Elt0 == i*2) && (Elt1 == i*2+1)) 4782 continue; 4783 if (V2Only && (Elt0 == i*2+16) && (Elt1 == i*2+17)) 4784 continue; 4785 4786 SDValue Elt0Src = Elt0 < 16 ? V1 : V2; 4787 SDValue Elt1Src = Elt1 < 16 ? V1 : V2; 4788 SDValue InsElt; 4789 4790 // If Elt0 and Elt1 are defined, are consecutive, and can be load 4791 // using a single extract together, load it and store it. 4792 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) { 4793 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 4794 DAG.getIntPtrConstant(Elt1 / 2)); 4795 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 4796 DAG.getIntPtrConstant(i)); 4797 continue; 4798 } 4799 4800 // If Elt1 is defined, extract it from the appropriate source. If the 4801 // source byte is not also odd, shift the extracted word left 8 bits 4802 // otherwise clear the bottom 8 bits if we need to do an or. 4803 if (Elt1 >= 0) { 4804 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 4805 DAG.getIntPtrConstant(Elt1 / 2)); 4806 if ((Elt1 & 1) == 0) 4807 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt, 4808 DAG.getConstant(8, TLI.getShiftAmountTy())); 4809 else if (Elt0 >= 0) 4810 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt, 4811 DAG.getConstant(0xFF00, MVT::i16)); 4812 } 4813 // If Elt0 is defined, extract it from the appropriate source. If the 4814 // source byte is not also even, shift the extracted word right 8 bits. If 4815 // Elt1 was also defined, OR the extracted values together before 4816 // inserting them in the result. 4817 if (Elt0 >= 0) { 4818 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, 4819 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2)); 4820 if ((Elt0 & 1) != 0) 4821 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0, 4822 DAG.getConstant(8, TLI.getShiftAmountTy())); 4823 else if (Elt1 >= 0) 4824 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0, 4825 DAG.getConstant(0x00FF, MVT::i16)); 4826 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0) 4827 : InsElt0; 4828 } 4829 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 4830 DAG.getIntPtrConstant(i)); 4831 } 4832 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, NewV); 4833} 4834 4835/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 4836/// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be 4837/// done when every pair / quad of shuffle mask elements point to elements in 4838/// the right sequence. e.g. 4839/// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15> 4840static 4841SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, 4842 SelectionDAG &DAG, DebugLoc dl) { 4843 EVT VT = SVOp->getValueType(0); 4844 SDValue V1 = SVOp->getOperand(0); 4845 SDValue V2 = SVOp->getOperand(1); 4846 unsigned NumElems = VT.getVectorNumElements(); 4847 unsigned NewWidth = (NumElems == 4) ? 2 : 4; 4848 EVT NewVT; 4849 switch (VT.getSimpleVT().SimpleTy) { 4850 default: assert(false && "Unexpected!"); 4851 case MVT::v4f32: NewVT = MVT::v2f64; break; 4852 case MVT::v4i32: NewVT = MVT::v2i64; break; 4853 case MVT::v8i16: NewVT = MVT::v4i32; break; 4854 case MVT::v16i8: NewVT = MVT::v4i32; break; 4855 } 4856 4857 int Scale = NumElems / NewWidth; 4858 SmallVector<int, 8> MaskVec; 4859 for (unsigned i = 0; i < NumElems; i += Scale) { 4860 int StartIdx = -1; 4861 for (int j = 0; j < Scale; ++j) { 4862 int EltIdx = SVOp->getMaskElt(i+j); 4863 if (EltIdx < 0) 4864 continue; 4865 if (StartIdx == -1) 4866 StartIdx = EltIdx - (EltIdx % Scale); 4867 if (EltIdx != StartIdx + j) 4868 return SDValue(); 4869 } 4870 if (StartIdx == -1) 4871 MaskVec.push_back(-1); 4872 else 4873 MaskVec.push_back(StartIdx / Scale); 4874 } 4875 4876 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, V1); 4877 V2 = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, V2); 4878 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]); 4879} 4880 4881/// getVZextMovL - Return a zero-extending vector move low node. 4882/// 4883static SDValue getVZextMovL(EVT VT, EVT OpVT, 4884 SDValue SrcOp, SelectionDAG &DAG, 4885 const X86Subtarget *Subtarget, DebugLoc dl) { 4886 if (VT == MVT::v2f64 || VT == MVT::v4f32) { 4887 LoadSDNode *LD = NULL; 4888 if (!isScalarLoadToVector(SrcOp.getNode(), &LD)) 4889 LD = dyn_cast<LoadSDNode>(SrcOp); 4890 if (!LD) { 4891 // movssrr and movsdrr do not clear top bits. Try to use movd, movq 4892 // instead. 4893 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; 4894 if ((ExtVT.SimpleTy != MVT::i64 || Subtarget->is64Bit()) && 4895 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR && 4896 SrcOp.getOperand(0).getOpcode() == ISD::BIT_CONVERT && 4897 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) { 4898 // PR2108 4899 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32; 4900 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, 4901 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 4902 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 4903 OpVT, 4904 SrcOp.getOperand(0) 4905 .getOperand(0)))); 4906 } 4907 } 4908 } 4909 4910 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, 4911 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 4912 DAG.getNode(ISD::BIT_CONVERT, dl, 4913 OpVT, SrcOp))); 4914} 4915 4916/// LowerVECTOR_SHUFFLE_4wide - Handle all 4 wide cases with a number of 4917/// shuffles. 4918static SDValue 4919LowerVECTOR_SHUFFLE_4wide(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 4920 SDValue V1 = SVOp->getOperand(0); 4921 SDValue V2 = SVOp->getOperand(1); 4922 DebugLoc dl = SVOp->getDebugLoc(); 4923 EVT VT = SVOp->getValueType(0); 4924 4925 SmallVector<std::pair<int, int>, 8> Locs; 4926 Locs.resize(4); 4927 SmallVector<int, 8> Mask1(4U, -1); 4928 SmallVector<int, 8> PermMask; 4929 SVOp->getMask(PermMask); 4930 4931 unsigned NumHi = 0; 4932 unsigned NumLo = 0; 4933 for (unsigned i = 0; i != 4; ++i) { 4934 int Idx = PermMask[i]; 4935 if (Idx < 0) { 4936 Locs[i] = std::make_pair(-1, -1); 4937 } else { 4938 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!"); 4939 if (Idx < 4) { 4940 Locs[i] = std::make_pair(0, NumLo); 4941 Mask1[NumLo] = Idx; 4942 NumLo++; 4943 } else { 4944 Locs[i] = std::make_pair(1, NumHi); 4945 if (2+NumHi < 4) 4946 Mask1[2+NumHi] = Idx; 4947 NumHi++; 4948 } 4949 } 4950 } 4951 4952 if (NumLo <= 2 && NumHi <= 2) { 4953 // If no more than two elements come from either vector. This can be 4954 // implemented with two shuffles. First shuffle gather the elements. 4955 // The second shuffle, which takes the first shuffle as both of its 4956 // vector operands, put the elements into the right order. 4957 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 4958 4959 SmallVector<int, 8> Mask2(4U, -1); 4960 4961 for (unsigned i = 0; i != 4; ++i) { 4962 if (Locs[i].first == -1) 4963 continue; 4964 else { 4965 unsigned Idx = (i < 2) ? 0 : 4; 4966 Idx += Locs[i].first * 2 + Locs[i].second; 4967 Mask2[i] = Idx; 4968 } 4969 } 4970 4971 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]); 4972 } else if (NumLo == 3 || NumHi == 3) { 4973 // Otherwise, we must have three elements from one vector, call it X, and 4974 // one element from the other, call it Y. First, use a shufps to build an 4975 // intermediate vector with the one element from Y and the element from X 4976 // that will be in the same half in the final destination (the indexes don't 4977 // matter). Then, use a shufps to build the final vector, taking the half 4978 // containing the element from Y from the intermediate, and the other half 4979 // from X. 4980 if (NumHi == 3) { 4981 // Normalize it so the 3 elements come from V1. 4982 CommuteVectorShuffleMask(PermMask, VT); 4983 std::swap(V1, V2); 4984 } 4985 4986 // Find the element from V2. 4987 unsigned HiIndex; 4988 for (HiIndex = 0; HiIndex < 3; ++HiIndex) { 4989 int Val = PermMask[HiIndex]; 4990 if (Val < 0) 4991 continue; 4992 if (Val >= 4) 4993 break; 4994 } 4995 4996 Mask1[0] = PermMask[HiIndex]; 4997 Mask1[1] = -1; 4998 Mask1[2] = PermMask[HiIndex^1]; 4999 Mask1[3] = -1; 5000 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 5001 5002 if (HiIndex >= 2) { 5003 Mask1[0] = PermMask[0]; 5004 Mask1[1] = PermMask[1]; 5005 Mask1[2] = HiIndex & 1 ? 6 : 4; 5006 Mask1[3] = HiIndex & 1 ? 4 : 6; 5007 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 5008 } else { 5009 Mask1[0] = HiIndex & 1 ? 2 : 0; 5010 Mask1[1] = HiIndex & 1 ? 0 : 2; 5011 Mask1[2] = PermMask[2]; 5012 Mask1[3] = PermMask[3]; 5013 if (Mask1[2] >= 0) 5014 Mask1[2] += 4; 5015 if (Mask1[3] >= 0) 5016 Mask1[3] += 4; 5017 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]); 5018 } 5019 } 5020 5021 // Break it into (shuffle shuffle_hi, shuffle_lo). 5022 Locs.clear(); 5023 SmallVector<int,8> LoMask(4U, -1); 5024 SmallVector<int,8> HiMask(4U, -1); 5025 5026 SmallVector<int,8> *MaskPtr = &LoMask; 5027 unsigned MaskIdx = 0; 5028 unsigned LoIdx = 0; 5029 unsigned HiIdx = 2; 5030 for (unsigned i = 0; i != 4; ++i) { 5031 if (i == 2) { 5032 MaskPtr = &HiMask; 5033 MaskIdx = 1; 5034 LoIdx = 0; 5035 HiIdx = 2; 5036 } 5037 int Idx = PermMask[i]; 5038 if (Idx < 0) { 5039 Locs[i] = std::make_pair(-1, -1); 5040 } else if (Idx < 4) { 5041 Locs[i] = std::make_pair(MaskIdx, LoIdx); 5042 (*MaskPtr)[LoIdx] = Idx; 5043 LoIdx++; 5044 } else { 5045 Locs[i] = std::make_pair(MaskIdx, HiIdx); 5046 (*MaskPtr)[HiIdx] = Idx; 5047 HiIdx++; 5048 } 5049 } 5050 5051 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]); 5052 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]); 5053 SmallVector<int, 8> MaskOps; 5054 for (unsigned i = 0; i != 4; ++i) { 5055 if (Locs[i].first == -1) { 5056 MaskOps.push_back(-1); 5057 } else { 5058 unsigned Idx = Locs[i].first * 4 + Locs[i].second; 5059 MaskOps.push_back(Idx); 5060 } 5061 } 5062 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]); 5063} 5064 5065static bool MayFoldVectorLoad(SDValue V) { 5066 if (V.hasOneUse() && V.getOpcode() == ISD::BIT_CONVERT) 5067 V = V.getOperand(0); 5068 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 5069 V = V.getOperand(0); 5070 if (MayFoldLoad(V)) 5071 return true; 5072 return false; 5073} 5074 5075// FIXME: the version above should always be used. Since there's 5076// a bug where several vector shuffles can't be folded because the 5077// DAG is not updated during lowering and a node claims to have two 5078// uses while it only has one, use this version, and let isel match 5079// another instruction if the load really happens to have more than 5080// one use. Remove this version after this bug get fixed. 5081static bool RelaxedMayFoldVectorLoad(SDValue V) { 5082 if (V.hasOneUse() && V.getOpcode() == ISD::BIT_CONVERT) 5083 V = V.getOperand(0); 5084 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 5085 V = V.getOperand(0); 5086 if (ISD::isNormalLoad(V.getNode())) 5087 return true; 5088 return false; 5089} 5090 5091/// CanFoldShuffleIntoVExtract - Check if the current shuffle is used by 5092/// a vector extract, and if both can be later optimized into a single load. 5093/// This is done in visitEXTRACT_VECTOR_ELT and the conditions are checked 5094/// here because otherwise a target specific shuffle node is going to be 5095/// emitted for this shuffle, and the optimization not done. 5096/// FIXME: This is probably not the best approach, but fix the problem 5097/// until the right path is decided. 5098static 5099bool CanXFormVExtractWithShuffleIntoLoad(SDValue V, SelectionDAG &DAG, 5100 const TargetLowering &TLI) { 5101 EVT VT = V.getValueType(); 5102 ShuffleVectorSDNode *SVOp = dyn_cast<ShuffleVectorSDNode>(V); 5103 5104 // Be sure that the vector shuffle is present in a pattern like this: 5105 // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), c) -> (f32 load $addr) 5106 if (!V.hasOneUse()) 5107 return false; 5108 5109 SDNode *N = *V.getNode()->use_begin(); 5110 if (N->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 5111 return false; 5112 5113 SDValue EltNo = N->getOperand(1); 5114 if (!isa<ConstantSDNode>(EltNo)) 5115 return false; 5116 5117 // If the bit convert changed the number of elements, it is unsafe 5118 // to examine the mask. 5119 bool HasShuffleIntoBitcast = false; 5120 if (V.getOpcode() == ISD::BIT_CONVERT) { 5121 EVT SrcVT = V.getOperand(0).getValueType(); 5122 if (SrcVT.getVectorNumElements() != VT.getVectorNumElements()) 5123 return false; 5124 V = V.getOperand(0); 5125 HasShuffleIntoBitcast = true; 5126 } 5127 5128 // Select the input vector, guarding against out of range extract vector. 5129 unsigned NumElems = VT.getVectorNumElements(); 5130 unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 5131 int Idx = (Elt > NumElems) ? -1 : SVOp->getMaskElt(Elt); 5132 V = (Idx < (int)NumElems) ? V.getOperand(0) : V.getOperand(1); 5133 5134 // Skip one more bit_convert if necessary 5135 if (V.getOpcode() == ISD::BIT_CONVERT) 5136 V = V.getOperand(0); 5137 5138 if (ISD::isNormalLoad(V.getNode())) { 5139 // Is the original load suitable? 5140 LoadSDNode *LN0 = cast<LoadSDNode>(V); 5141 5142 // FIXME: avoid the multi-use bug that is preventing lots of 5143 // of foldings to be detected, this is still wrong of course, but 5144 // give the temporary desired behavior, and if it happens that 5145 // the load has real more uses, during isel it will not fold, and 5146 // will generate poor code. 5147 if (!LN0 || LN0->isVolatile()) // || !LN0->hasOneUse() 5148 return false; 5149 5150 if (!HasShuffleIntoBitcast) 5151 return true; 5152 5153 // If there's a bitcast before the shuffle, check if the load type and 5154 // alignment is valid. 5155 unsigned Align = LN0->getAlignment(); 5156 unsigned NewAlign = 5157 TLI.getTargetData()->getABITypeAlignment( 5158 VT.getTypeForEVT(*DAG.getContext())); 5159 5160 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT)) 5161 return false; 5162 } 5163 5164 return true; 5165} 5166 5167static 5168SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, 5169 bool HasSSE2) { 5170 SDValue V1 = Op.getOperand(0); 5171 SDValue V2 = Op.getOperand(1); 5172 EVT VT = Op.getValueType(); 5173 5174 assert(VT != MVT::v2i64 && "unsupported shuffle type"); 5175 5176 if (HasSSE2 && VT == MVT::v2f64) 5177 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG); 5178 5179 // v4f32 or v4i32 5180 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V2, DAG); 5181} 5182 5183static 5184SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) { 5185 SDValue V1 = Op.getOperand(0); 5186 SDValue V2 = Op.getOperand(1); 5187 EVT VT = Op.getValueType(); 5188 5189 assert((VT == MVT::v4i32 || VT == MVT::v4f32) && 5190 "unsupported shuffle type"); 5191 5192 if (V2.getOpcode() == ISD::UNDEF) 5193 V2 = V1; 5194 5195 // v4i32 or v4f32 5196 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG); 5197} 5198 5199static 5200SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) { 5201 SDValue V1 = Op.getOperand(0); 5202 SDValue V2 = Op.getOperand(1); 5203 EVT VT = Op.getValueType(); 5204 unsigned NumElems = VT.getVectorNumElements(); 5205 5206 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second 5207 // operand of these instructions is only memory, so check if there's a 5208 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the 5209 // same masks. 5210 bool CanFoldLoad = false; 5211 5212 // Trivial case, when V2 comes from a load. 5213 if (MayFoldVectorLoad(V2)) 5214 CanFoldLoad = true; 5215 5216 // When V1 is a load, it can be folded later into a store in isel, example: 5217 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1) 5218 // turns into: 5219 // (MOVLPSmr addr:$src1, VR128:$src2) 5220 // So, recognize this potential and also use MOVLPS or MOVLPD 5221 if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op)) 5222 CanFoldLoad = true; 5223 5224 if (CanFoldLoad) { 5225 if (HasSSE2 && NumElems == 2) 5226 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG); 5227 5228 if (NumElems == 4) 5229 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG); 5230 } 5231 5232 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5233 // movl and movlp will both match v2i64, but v2i64 is never matched by 5234 // movl earlier because we make it strict to avoid messing with the movlp load 5235 // folding logic (see the code above getMOVLP call). Match it here then, 5236 // this is horrible, but will stay like this until we move all shuffle 5237 // matching to x86 specific nodes. Note that for the 1st condition all 5238 // types are matched with movsd. 5239 if ((HasSSE2 && NumElems == 2) || !X86::isMOVLMask(SVOp)) 5240 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 5241 else if (HasSSE2) 5242 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 5243 5244 5245 assert(VT != MVT::v4i32 && "unsupported shuffle type"); 5246 5247 // Invert the operand order and use SHUFPS to match it. 5248 return getTargetShuffleNode(X86ISD::SHUFPS, dl, VT, V2, V1, 5249 X86::getShuffleSHUFImmediate(SVOp), DAG); 5250} 5251 5252static inline unsigned getUNPCKLOpcode(EVT VT) { 5253 switch(VT.getSimpleVT().SimpleTy) { 5254 case MVT::v4i32: return X86ISD::PUNPCKLDQ; 5255 case MVT::v2i64: return X86ISD::PUNPCKLQDQ; 5256 case MVT::v4f32: return X86ISD::UNPCKLPS; 5257 case MVT::v2f64: return X86ISD::UNPCKLPD; 5258 case MVT::v16i8: return X86ISD::PUNPCKLBW; 5259 case MVT::v8i16: return X86ISD::PUNPCKLWD; 5260 default: 5261 llvm_unreachable("Unknow type for unpckl"); 5262 } 5263 return 0; 5264} 5265 5266static inline unsigned getUNPCKHOpcode(EVT VT) { 5267 switch(VT.getSimpleVT().SimpleTy) { 5268 case MVT::v4i32: return X86ISD::PUNPCKHDQ; 5269 case MVT::v2i64: return X86ISD::PUNPCKHQDQ; 5270 case MVT::v4f32: return X86ISD::UNPCKHPS; 5271 case MVT::v2f64: return X86ISD::UNPCKHPD; 5272 case MVT::v16i8: return X86ISD::PUNPCKHBW; 5273 case MVT::v8i16: return X86ISD::PUNPCKHWD; 5274 default: 5275 llvm_unreachable("Unknow type for unpckh"); 5276 } 5277 return 0; 5278} 5279 5280static 5281SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG, 5282 const TargetLowering &TLI, 5283 const X86Subtarget *Subtarget) { 5284 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5285 EVT VT = Op.getValueType(); 5286 DebugLoc dl = Op.getDebugLoc(); 5287 SDValue V1 = Op.getOperand(0); 5288 SDValue V2 = Op.getOperand(1); 5289 5290 if (isZeroShuffle(SVOp)) 5291 return getZeroVector(VT, Subtarget->hasSSE2(), DAG, dl); 5292 5293 // Handle splat operations 5294 if (SVOp->isSplat()) { 5295 // Special case, this is the only place now where it's 5296 // allowed to return a vector_shuffle operation without 5297 // using a target specific node, because *hopefully* it 5298 // will be optimized away by the dag combiner. 5299 if (VT.getVectorNumElements() <= 4 && 5300 CanXFormVExtractWithShuffleIntoLoad(Op, DAG, TLI)) 5301 return Op; 5302 5303 // Handle splats by matching through known masks 5304 if (VT.getVectorNumElements() <= 4) 5305 return SDValue(); 5306 5307 // Canonize all of the remaining to v4f32. 5308 return PromoteSplat(SVOp, DAG); 5309 } 5310 5311 // If the shuffle can be profitably rewritten as a narrower shuffle, then 5312 // do it! 5313 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 5314 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 5315 if (NewOp.getNode()) 5316 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, NewOp); 5317 } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 5318 // FIXME: Figure out a cleaner way to do this. 5319 // Try to make use of movq to zero out the top part. 5320 if (ISD::isBuildVectorAllZeros(V2.getNode())) { 5321 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 5322 if (NewOp.getNode()) { 5323 if (isCommutedMOVL(cast<ShuffleVectorSDNode>(NewOp), true, false)) 5324 return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(0), 5325 DAG, Subtarget, dl); 5326 } 5327 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) { 5328 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 5329 if (NewOp.getNode() && X86::isMOVLMask(cast<ShuffleVectorSDNode>(NewOp))) 5330 return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(1), 5331 DAG, Subtarget, dl); 5332 } 5333 } 5334 return SDValue(); 5335} 5336 5337SDValue 5338X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { 5339 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5340 SDValue V1 = Op.getOperand(0); 5341 SDValue V2 = Op.getOperand(1); 5342 EVT VT = Op.getValueType(); 5343 DebugLoc dl = Op.getDebugLoc(); 5344 unsigned NumElems = VT.getVectorNumElements(); 5345 bool isMMX = VT.getSizeInBits() == 64; 5346 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 5347 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 5348 bool V1IsSplat = false; 5349 bool V2IsSplat = false; 5350 bool HasSSE2 = Subtarget->hasSSE2() || Subtarget->hasAVX(); 5351 bool HasSSE3 = Subtarget->hasSSE3() || Subtarget->hasAVX(); 5352 bool HasSSSE3 = Subtarget->hasSSSE3() || Subtarget->hasAVX(); 5353 MachineFunction &MF = DAG.getMachineFunction(); 5354 bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize); 5355 5356 // Shuffle operations on MMX not supported. 5357 if (isMMX) 5358 return Op; 5359 5360 // Vector shuffle lowering takes 3 steps: 5361 // 5362 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable 5363 // narrowing and commutation of operands should be handled. 5364 // 2) Matching of shuffles with known shuffle masks to x86 target specific 5365 // shuffle nodes. 5366 // 3) Rewriting of unmatched masks into new generic shuffle operations, 5367 // so the shuffle can be broken into other shuffles and the legalizer can 5368 // try the lowering again. 5369 // 5370 // The general ideia is that no vector_shuffle operation should be left to 5371 // be matched during isel, all of them must be converted to a target specific 5372 // node here. 5373 5374 // Normalize the input vectors. Here splats, zeroed vectors, profitable 5375 // narrowing and commutation of operands should be handled. The actual code 5376 // doesn't include all of those, work in progress... 5377 SDValue NewOp = NormalizeVectorShuffle(Op, DAG, *this, Subtarget); 5378 if (NewOp.getNode()) 5379 return NewOp; 5380 5381 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and 5382 // unpckh_undef). Only use pshufd if speed is more important than size. 5383 if (OptForSize && X86::isUNPCKL_v_undef_Mask(SVOp)) 5384 if (VT != MVT::v2i64 && VT != MVT::v2f64) 5385 return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V1, DAG); 5386 if (OptForSize && X86::isUNPCKH_v_undef_Mask(SVOp)) 5387 if (VT != MVT::v2i64 && VT != MVT::v2f64) 5388 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG); 5389 5390 if (X86::isMOVDDUPMask(SVOp) && HasSSE3 && V2IsUndef && 5391 RelaxedMayFoldVectorLoad(V1)) 5392 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG); 5393 5394 if (X86::isMOVHLPS_v_undef_Mask(SVOp)) 5395 return getMOVHighToLow(Op, dl, DAG); 5396 5397 // Use to match splats 5398 if (HasSSE2 && X86::isUNPCKHMask(SVOp) && V2IsUndef && 5399 (VT == MVT::v2f64 || VT == MVT::v2i64)) 5400 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG); 5401 5402 if (X86::isPSHUFDMask(SVOp)) { 5403 // The actual implementation will match the mask in the if above and then 5404 // during isel it can match several different instructions, not only pshufd 5405 // as its name says, sad but true, emulate the behavior for now... 5406 if (X86::isMOVDDUPMask(SVOp) && ((VT == MVT::v4f32 || VT == MVT::v2i64))) 5407 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG); 5408 5409 unsigned TargetMask = X86::getShuffleSHUFImmediate(SVOp); 5410 5411 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32)) 5412 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG); 5413 5414 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) 5415 return getTargetShuffleNode(X86ISD::SHUFPD, dl, VT, V1, V1, 5416 TargetMask, DAG); 5417 5418 if (VT == MVT::v4f32) 5419 return getTargetShuffleNode(X86ISD::SHUFPS, dl, VT, V1, V1, 5420 TargetMask, DAG); 5421 } 5422 5423 // Check if this can be converted into a logical shift. 5424 bool isLeft = false; 5425 unsigned ShAmt = 0; 5426 SDValue ShVal; 5427 bool isShift = getSubtarget()->hasSSE2() && 5428 isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt); 5429 if (isShift && ShVal.hasOneUse()) { 5430 // If the shifted value has multiple uses, it may be cheaper to use 5431 // v_set0 + movlhps or movhlps, etc. 5432 EVT EltVT = VT.getVectorElementType(); 5433 ShAmt *= EltVT.getSizeInBits(); 5434 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 5435 } 5436 5437 if (X86::isMOVLMask(SVOp)) { 5438 if (V1IsUndef) 5439 return V2; 5440 if (ISD::isBuildVectorAllZeros(V1.getNode())) 5441 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl); 5442 if (!X86::isMOVLPMask(SVOp)) { 5443 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) 5444 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 5445 5446 if (VT == MVT::v4i32 || VT == MVT::v4f32) 5447 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 5448 } 5449 } 5450 5451 // FIXME: fold these into legal mask. 5452 if (X86::isMOVLHPSMask(SVOp) && !X86::isUNPCKLMask(SVOp)) 5453 return getMOVLowToHigh(Op, dl, DAG, HasSSE2); 5454 5455 if (X86::isMOVHLPSMask(SVOp)) 5456 return getMOVHighToLow(Op, dl, DAG); 5457 5458 if (X86::isMOVSHDUPMask(SVOp) && HasSSE3 && V2IsUndef && NumElems == 4) 5459 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG); 5460 5461 if (X86::isMOVSLDUPMask(SVOp) && HasSSE3 && V2IsUndef && NumElems == 4) 5462 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG); 5463 5464 if (X86::isMOVLPMask(SVOp)) 5465 return getMOVLP(Op, dl, DAG, HasSSE2); 5466 5467 if (ShouldXformToMOVHLPS(SVOp) || 5468 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), SVOp)) 5469 return CommuteVectorShuffle(SVOp, DAG); 5470 5471 if (isShift) { 5472 // No better options. Use a vshl / vsrl. 5473 EVT EltVT = VT.getVectorElementType(); 5474 ShAmt *= EltVT.getSizeInBits(); 5475 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 5476 } 5477 5478 bool Commuted = false; 5479 // FIXME: This should also accept a bitcast of a splat? Be careful, not 5480 // 1,1,1,1 -> v8i16 though. 5481 V1IsSplat = isSplatVector(V1.getNode()); 5482 V2IsSplat = isSplatVector(V2.getNode()); 5483 5484 // Canonicalize the splat or undef, if present, to be on the RHS. 5485 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 5486 Op = CommuteVectorShuffle(SVOp, DAG); 5487 SVOp = cast<ShuffleVectorSDNode>(Op); 5488 V1 = SVOp->getOperand(0); 5489 V2 = SVOp->getOperand(1); 5490 std::swap(V1IsSplat, V2IsSplat); 5491 std::swap(V1IsUndef, V2IsUndef); 5492 Commuted = true; 5493 } 5494 5495 if (isCommutedMOVL(SVOp, V2IsSplat, V2IsUndef)) { 5496 // Shuffling low element of v1 into undef, just return v1. 5497 if (V2IsUndef) 5498 return V1; 5499 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which 5500 // the instruction selector will not match, so get a canonical MOVL with 5501 // swapped operands to undo the commute. 5502 return getMOVL(DAG, dl, VT, V2, V1); 5503 } 5504 5505 if (X86::isUNPCKLMask(SVOp)) 5506 return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V2, DAG); 5507 5508 if (X86::isUNPCKHMask(SVOp)) 5509 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V2, DAG); 5510 5511 if (V2IsSplat) { 5512 // Normalize mask so all entries that point to V2 points to its first 5513 // element then try to match unpck{h|l} again. If match, return a 5514 // new vector_shuffle with the corrected mask. 5515 SDValue NewMask = NormalizeMask(SVOp, DAG); 5516 ShuffleVectorSDNode *NSVOp = cast<ShuffleVectorSDNode>(NewMask); 5517 if (NSVOp != SVOp) { 5518 if (X86::isUNPCKLMask(NSVOp, true)) { 5519 return NewMask; 5520 } else if (X86::isUNPCKHMask(NSVOp, true)) { 5521 return NewMask; 5522 } 5523 } 5524 } 5525 5526 if (Commuted) { 5527 // Commute is back and try unpck* again. 5528 // FIXME: this seems wrong. 5529 SDValue NewOp = CommuteVectorShuffle(SVOp, DAG); 5530 ShuffleVectorSDNode *NewSVOp = cast<ShuffleVectorSDNode>(NewOp); 5531 5532 if (X86::isUNPCKLMask(NewSVOp)) 5533 return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V2, V1, DAG); 5534 5535 if (X86::isUNPCKHMask(NewSVOp)) 5536 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V2, V1, DAG); 5537 } 5538 5539 // Normalize the node to match x86 shuffle ops if needed 5540 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(SVOp)) 5541 return CommuteVectorShuffle(SVOp, DAG); 5542 5543 // The checks below are all present in isShuffleMaskLegal, but they are 5544 // inlined here right now to enable us to directly emit target specific 5545 // nodes, and remove one by one until they don't return Op anymore. 5546 SmallVector<int, 16> M; 5547 SVOp->getMask(M); 5548 5549 if (isPALIGNRMask(M, VT, HasSSSE3)) 5550 return getTargetShuffleNode(X86ISD::PALIGN, dl, VT, V1, V2, 5551 X86::getShufflePALIGNRImmediate(SVOp), 5552 DAG); 5553 5554 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) && 5555 SVOp->getSplatIndex() == 0 && V2IsUndef) { 5556 if (VT == MVT::v2f64) 5557 return getTargetShuffleNode(X86ISD::UNPCKLPD, dl, VT, V1, V1, DAG); 5558 if (VT == MVT::v2i64) 5559 return getTargetShuffleNode(X86ISD::PUNPCKLQDQ, dl, VT, V1, V1, DAG); 5560 } 5561 5562 if (isPSHUFHWMask(M, VT)) 5563 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1, 5564 X86::getShufflePSHUFHWImmediate(SVOp), 5565 DAG); 5566 5567 if (isPSHUFLWMask(M, VT)) 5568 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1, 5569 X86::getShufflePSHUFLWImmediate(SVOp), 5570 DAG); 5571 5572 if (isSHUFPMask(M, VT)) { 5573 unsigned TargetMask = X86::getShuffleSHUFImmediate(SVOp); 5574 if (VT == MVT::v4f32 || VT == MVT::v4i32) 5575 return getTargetShuffleNode(X86ISD::SHUFPS, dl, VT, V1, V2, 5576 TargetMask, DAG); 5577 if (VT == MVT::v2f64 || VT == MVT::v2i64) 5578 return getTargetShuffleNode(X86ISD::SHUFPD, dl, VT, V1, V2, 5579 TargetMask, DAG); 5580 } 5581 5582 if (X86::isUNPCKL_v_undef_Mask(SVOp)) 5583 if (VT != MVT::v2i64 && VT != MVT::v2f64) 5584 return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V1, DAG); 5585 if (X86::isUNPCKH_v_undef_Mask(SVOp)) 5586 if (VT != MVT::v2i64 && VT != MVT::v2f64) 5587 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG); 5588 5589 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 5590 if (VT == MVT::v8i16) { 5591 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, DAG); 5592 if (NewOp.getNode()) 5593 return NewOp; 5594 } 5595 5596 if (VT == MVT::v16i8) { 5597 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, DAG, *this); 5598 if (NewOp.getNode()) 5599 return NewOp; 5600 } 5601 5602 // Handle all 4 wide cases with a number of shuffles. 5603 if (NumElems == 4) 5604 return LowerVECTOR_SHUFFLE_4wide(SVOp, DAG); 5605 5606 return SDValue(); 5607} 5608 5609SDValue 5610X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, 5611 SelectionDAG &DAG) const { 5612 EVT VT = Op.getValueType(); 5613 DebugLoc dl = Op.getDebugLoc(); 5614 if (VT.getSizeInBits() == 8) { 5615 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, 5616 Op.getOperand(0), Op.getOperand(1)); 5617 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 5618 DAG.getValueType(VT)); 5619 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 5620 } else if (VT.getSizeInBits() == 16) { 5621 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 5622 // If Idx is 0, it's cheaper to do a move instead of a pextrw. 5623 if (Idx == 0) 5624 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 5625 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 5626 DAG.getNode(ISD::BIT_CONVERT, dl, 5627 MVT::v4i32, 5628 Op.getOperand(0)), 5629 Op.getOperand(1))); 5630 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, 5631 Op.getOperand(0), Op.getOperand(1)); 5632 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 5633 DAG.getValueType(VT)); 5634 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 5635 } else if (VT == MVT::f32) { 5636 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy 5637 // the result back to FR32 register. It's only worth matching if the 5638 // result has a single use which is a store or a bitcast to i32. And in 5639 // the case of a store, it's not worth it if the index is a constant 0, 5640 // because a MOVSSmr can be used instead, which is smaller and faster. 5641 if (!Op.hasOneUse()) 5642 return SDValue(); 5643 SDNode *User = *Op.getNode()->use_begin(); 5644 if ((User->getOpcode() != ISD::STORE || 5645 (isa<ConstantSDNode>(Op.getOperand(1)) && 5646 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) && 5647 (User->getOpcode() != ISD::BIT_CONVERT || 5648 User->getValueType(0) != MVT::i32)) 5649 return SDValue(); 5650 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 5651 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, 5652 Op.getOperand(0)), 5653 Op.getOperand(1)); 5654 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Extract); 5655 } else if (VT == MVT::i32) { 5656 // ExtractPS works with constant index. 5657 if (isa<ConstantSDNode>(Op.getOperand(1))) 5658 return Op; 5659 } 5660 return SDValue(); 5661} 5662 5663 5664SDValue 5665X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 5666 SelectionDAG &DAG) const { 5667 if (!isa<ConstantSDNode>(Op.getOperand(1))) 5668 return SDValue(); 5669 5670 if (Subtarget->hasSSE41()) { 5671 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 5672 if (Res.getNode()) 5673 return Res; 5674 } 5675 5676 EVT VT = Op.getValueType(); 5677 DebugLoc dl = Op.getDebugLoc(); 5678 // TODO: handle v16i8. 5679 if (VT.getSizeInBits() == 16) { 5680 SDValue Vec = Op.getOperand(0); 5681 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 5682 if (Idx == 0) 5683 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 5684 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 5685 DAG.getNode(ISD::BIT_CONVERT, dl, 5686 MVT::v4i32, Vec), 5687 Op.getOperand(1))); 5688 // Transform it so it match pextrw which produces a 32-bit result. 5689 EVT EltVT = MVT::i32; 5690 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, 5691 Op.getOperand(0), Op.getOperand(1)); 5692 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract, 5693 DAG.getValueType(VT)); 5694 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 5695 } else if (VT.getSizeInBits() == 32) { 5696 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 5697 if (Idx == 0) 5698 return Op; 5699 5700 // SHUFPS the element to the lowest double word, then movss. 5701 int Mask[4] = { Idx, -1, -1, -1 }; 5702 EVT VVT = Op.getOperand(0).getValueType(); 5703 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 5704 DAG.getUNDEF(VVT), Mask); 5705 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 5706 DAG.getIntPtrConstant(0)); 5707 } else if (VT.getSizeInBits() == 64) { 5708 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 5709 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 5710 // to match extract_elt for f64. 5711 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 5712 if (Idx == 0) 5713 return Op; 5714 5715 // UNPCKHPD the element to the lowest double word, then movsd. 5716 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 5717 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 5718 int Mask[2] = { 1, -1 }; 5719 EVT VVT = Op.getOperand(0).getValueType(); 5720 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 5721 DAG.getUNDEF(VVT), Mask); 5722 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 5723 DAG.getIntPtrConstant(0)); 5724 } 5725 5726 return SDValue(); 5727} 5728 5729SDValue 5730X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, 5731 SelectionDAG &DAG) const { 5732 EVT VT = Op.getValueType(); 5733 EVT EltVT = VT.getVectorElementType(); 5734 DebugLoc dl = Op.getDebugLoc(); 5735 5736 SDValue N0 = Op.getOperand(0); 5737 SDValue N1 = Op.getOperand(1); 5738 SDValue N2 = Op.getOperand(2); 5739 5740 if ((EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) && 5741 isa<ConstantSDNode>(N2)) { 5742 unsigned Opc; 5743 if (VT == MVT::v8i16) 5744 Opc = X86ISD::PINSRW; 5745 else if (VT == MVT::v16i8) 5746 Opc = X86ISD::PINSRB; 5747 else 5748 Opc = X86ISD::PINSRB; 5749 5750 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 5751 // argument. 5752 if (N1.getValueType() != MVT::i32) 5753 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 5754 if (N2.getValueType() != MVT::i32) 5755 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 5756 return DAG.getNode(Opc, dl, VT, N0, N1, N2); 5757 } else if (EltVT == MVT::f32 && isa<ConstantSDNode>(N2)) { 5758 // Bits [7:6] of the constant are the source select. This will always be 5759 // zero here. The DAG Combiner may combine an extract_elt index into these 5760 // bits. For example (insert (extract, 3), 2) could be matched by putting 5761 // the '3' into bits [7:6] of X86ISD::INSERTPS. 5762 // Bits [5:4] of the constant are the destination select. This is the 5763 // value of the incoming immediate. 5764 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 5765 // combine either bitwise AND or insert of float 0.0 to set these bits. 5766 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4); 5767 // Create this as a scalar to vector.. 5768 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1); 5769 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2); 5770 } else if (EltVT == MVT::i32 && isa<ConstantSDNode>(N2)) { 5771 // PINSR* works with constant index. 5772 return Op; 5773 } 5774 return SDValue(); 5775} 5776 5777SDValue 5778X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { 5779 EVT VT = Op.getValueType(); 5780 EVT EltVT = VT.getVectorElementType(); 5781 5782 if (Subtarget->hasSSE41()) 5783 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 5784 5785 if (EltVT == MVT::i8) 5786 return SDValue(); 5787 5788 DebugLoc dl = Op.getDebugLoc(); 5789 SDValue N0 = Op.getOperand(0); 5790 SDValue N1 = Op.getOperand(1); 5791 SDValue N2 = Op.getOperand(2); 5792 5793 if (EltVT.getSizeInBits() == 16 && isa<ConstantSDNode>(N2)) { 5794 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 5795 // as its second argument. 5796 if (N1.getValueType() != MVT::i32) 5797 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 5798 if (N2.getValueType() != MVT::i32) 5799 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 5800 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2); 5801 } 5802 return SDValue(); 5803} 5804 5805SDValue 5806X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const { 5807 DebugLoc dl = Op.getDebugLoc(); 5808 5809 if (Op.getValueType() == MVT::v1i64 && 5810 Op.getOperand(0).getValueType() == MVT::i64) 5811 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0)); 5812 5813 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0)); 5814 assert(Op.getValueType().getSimpleVT().getSizeInBits() == 128 && 5815 "Expected an SSE type!"); 5816 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), 5817 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt)); 5818} 5819 5820// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 5821// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 5822// one of the above mentioned nodes. It has to be wrapped because otherwise 5823// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 5824// be used to form addressing mode. These wrapped nodes will be selected 5825// into MOV32ri. 5826SDValue 5827X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { 5828 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 5829 5830 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 5831 // global base reg. 5832 unsigned char OpFlag = 0; 5833 unsigned WrapperKind = X86ISD::Wrapper; 5834 CodeModel::Model M = getTargetMachine().getCodeModel(); 5835 5836 if (Subtarget->isPICStyleRIPRel() && 5837 (M == CodeModel::Small || M == CodeModel::Kernel)) 5838 WrapperKind = X86ISD::WrapperRIP; 5839 else if (Subtarget->isPICStyleGOT()) 5840 OpFlag = X86II::MO_GOTOFF; 5841 else if (Subtarget->isPICStyleStubPIC()) 5842 OpFlag = X86II::MO_PIC_BASE_OFFSET; 5843 5844 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(), 5845 CP->getAlignment(), 5846 CP->getOffset(), OpFlag); 5847 DebugLoc DL = CP->getDebugLoc(); 5848 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 5849 // With PIC, the address is actually $g + Offset. 5850 if (OpFlag) { 5851 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 5852 DAG.getNode(X86ISD::GlobalBaseReg, 5853 DebugLoc(), getPointerTy()), 5854 Result); 5855 } 5856 5857 return Result; 5858} 5859 5860SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 5861 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 5862 5863 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 5864 // global base reg. 5865 unsigned char OpFlag = 0; 5866 unsigned WrapperKind = X86ISD::Wrapper; 5867 CodeModel::Model M = getTargetMachine().getCodeModel(); 5868 5869 if (Subtarget->isPICStyleRIPRel() && 5870 (M == CodeModel::Small || M == CodeModel::Kernel)) 5871 WrapperKind = X86ISD::WrapperRIP; 5872 else if (Subtarget->isPICStyleGOT()) 5873 OpFlag = X86II::MO_GOTOFF; 5874 else if (Subtarget->isPICStyleStubPIC()) 5875 OpFlag = X86II::MO_PIC_BASE_OFFSET; 5876 5877 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(), 5878 OpFlag); 5879 DebugLoc DL = JT->getDebugLoc(); 5880 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 5881 5882 // With PIC, the address is actually $g + Offset. 5883 if (OpFlag) { 5884 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 5885 DAG.getNode(X86ISD::GlobalBaseReg, 5886 DebugLoc(), getPointerTy()), 5887 Result); 5888 } 5889 5890 return Result; 5891} 5892 5893SDValue 5894X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { 5895 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 5896 5897 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 5898 // global base reg. 5899 unsigned char OpFlag = 0; 5900 unsigned WrapperKind = X86ISD::Wrapper; 5901 CodeModel::Model M = getTargetMachine().getCodeModel(); 5902 5903 if (Subtarget->isPICStyleRIPRel() && 5904 (M == CodeModel::Small || M == CodeModel::Kernel)) 5905 WrapperKind = X86ISD::WrapperRIP; 5906 else if (Subtarget->isPICStyleGOT()) 5907 OpFlag = X86II::MO_GOTOFF; 5908 else if (Subtarget->isPICStyleStubPIC()) 5909 OpFlag = X86II::MO_PIC_BASE_OFFSET; 5910 5911 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag); 5912 5913 DebugLoc DL = Op.getDebugLoc(); 5914 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 5915 5916 5917 // With PIC, the address is actually $g + Offset. 5918 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 5919 !Subtarget->is64Bit()) { 5920 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 5921 DAG.getNode(X86ISD::GlobalBaseReg, 5922 DebugLoc(), getPointerTy()), 5923 Result); 5924 } 5925 5926 return Result; 5927} 5928 5929SDValue 5930X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 5931 // Create the TargetBlockAddressAddress node. 5932 unsigned char OpFlags = 5933 Subtarget->ClassifyBlockAddressReference(); 5934 CodeModel::Model M = getTargetMachine().getCodeModel(); 5935 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 5936 DebugLoc dl = Op.getDebugLoc(); 5937 SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), 5938 /*isTarget=*/true, OpFlags); 5939 5940 if (Subtarget->isPICStyleRIPRel() && 5941 (M == CodeModel::Small || M == CodeModel::Kernel)) 5942 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 5943 else 5944 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 5945 5946 // With PIC, the address is actually $g + Offset. 5947 if (isGlobalRelativeToPICBase(OpFlags)) { 5948 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 5949 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 5950 Result); 5951 } 5952 5953 return Result; 5954} 5955 5956SDValue 5957X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, 5958 int64_t Offset, 5959 SelectionDAG &DAG) const { 5960 // Create the TargetGlobalAddress node, folding in the constant 5961 // offset if it is legal. 5962 unsigned char OpFlags = 5963 Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); 5964 CodeModel::Model M = getTargetMachine().getCodeModel(); 5965 SDValue Result; 5966 if (OpFlags == X86II::MO_NO_FLAG && 5967 X86::isOffsetSuitableForCodeModel(Offset, M)) { 5968 // A direct static reference to a global. 5969 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset); 5970 Offset = 0; 5971 } else { 5972 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 5973 } 5974 5975 if (Subtarget->isPICStyleRIPRel() && 5976 (M == CodeModel::Small || M == CodeModel::Kernel)) 5977 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 5978 else 5979 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 5980 5981 // With PIC, the address is actually $g + Offset. 5982 if (isGlobalRelativeToPICBase(OpFlags)) { 5983 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 5984 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 5985 Result); 5986 } 5987 5988 // For globals that require a load from a stub to get the address, emit the 5989 // load. 5990 if (isGlobalStubReference(OpFlags)) 5991 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result, 5992 MachinePointerInfo::getGOT(), false, false, 0); 5993 5994 // If there was a non-zero offset that we didn't fold, create an explicit 5995 // addition for it. 5996 if (Offset != 0) 5997 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result, 5998 DAG.getConstant(Offset, getPointerTy())); 5999 6000 return Result; 6001} 6002 6003SDValue 6004X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { 6005 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 6006 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); 6007 return LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG); 6008} 6009 6010static SDValue 6011GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, 6012 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg, 6013 unsigned char OperandFlags) { 6014 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 6015 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 6016 DebugLoc dl = GA->getDebugLoc(); 6017 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 6018 GA->getValueType(0), 6019 GA->getOffset(), 6020 OperandFlags); 6021 if (InFlag) { 6022 SDValue Ops[] = { Chain, TGA, *InFlag }; 6023 Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 3); 6024 } else { 6025 SDValue Ops[] = { Chain, TGA }; 6026 Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 2); 6027 } 6028 6029 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 6030 MFI->setAdjustsStack(true); 6031 6032 SDValue Flag = Chain.getValue(1); 6033 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag); 6034} 6035 6036// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit 6037static SDValue 6038LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, 6039 const EVT PtrVT) { 6040 SDValue InFlag; 6041 DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better 6042 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 6043 DAG.getNode(X86ISD::GlobalBaseReg, 6044 DebugLoc(), PtrVT), InFlag); 6045 InFlag = Chain.getValue(1); 6046 6047 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD); 6048} 6049 6050// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit 6051static SDValue 6052LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, 6053 const EVT PtrVT) { 6054 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, 6055 X86::RAX, X86II::MO_TLSGD); 6056} 6057 6058// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 6059// "local exec" model. 6060static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 6061 const EVT PtrVT, TLSModel::Model model, 6062 bool is64Bit) { 6063 DebugLoc dl = GA->getDebugLoc(); 6064 6065 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit). 6066 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(), 6067 is64Bit ? 257 : 256)); 6068 6069 SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 6070 DAG.getIntPtrConstant(0), 6071 MachinePointerInfo(Ptr), false, false, 0); 6072 6073 unsigned char OperandFlags = 0; 6074 // Most TLS accesses are not RIP relative, even on x86-64. One exception is 6075 // initialexec. 6076 unsigned WrapperKind = X86ISD::Wrapper; 6077 if (model == TLSModel::LocalExec) { 6078 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF; 6079 } else if (is64Bit) { 6080 assert(model == TLSModel::InitialExec); 6081 OperandFlags = X86II::MO_GOTTPOFF; 6082 WrapperKind = X86ISD::WrapperRIP; 6083 } else { 6084 assert(model == TLSModel::InitialExec); 6085 OperandFlags = X86II::MO_INDNTPOFF; 6086 } 6087 6088 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 6089 // exec) 6090 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 6091 GA->getValueType(0), 6092 GA->getOffset(), OperandFlags); 6093 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 6094 6095 if (model == TLSModel::InitialExec) 6096 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, 6097 MachinePointerInfo::getGOT(), false, false, 0); 6098 6099 // The address of the thread local variable is the add of the thread 6100 // pointer with the offset of the variable. 6101 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 6102} 6103 6104SDValue 6105X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 6106 6107 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 6108 const GlobalValue *GV = GA->getGlobal(); 6109 6110 if (Subtarget->isTargetELF()) { 6111 // TODO: implement the "local dynamic" model 6112 // TODO: implement the "initial exec"model for pic executables 6113 6114 // If GV is an alias then use the aliasee for determining 6115 // thread-localness. 6116 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 6117 GV = GA->resolveAliasedGlobal(false); 6118 6119 TLSModel::Model model 6120 = getTLSModel(GV, getTargetMachine().getRelocationModel()); 6121 6122 switch (model) { 6123 case TLSModel::GeneralDynamic: 6124 case TLSModel::LocalDynamic: // not implemented 6125 if (Subtarget->is64Bit()) 6126 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy()); 6127 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy()); 6128 6129 case TLSModel::InitialExec: 6130 case TLSModel::LocalExec: 6131 return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, 6132 Subtarget->is64Bit()); 6133 } 6134 } else if (Subtarget->isTargetDarwin()) { 6135 // Darwin only has one model of TLS. Lower to that. 6136 unsigned char OpFlag = 0; 6137 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ? 6138 X86ISD::WrapperRIP : X86ISD::Wrapper; 6139 6140 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 6141 // global base reg. 6142 bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) && 6143 !Subtarget->is64Bit(); 6144 if (PIC32) 6145 OpFlag = X86II::MO_TLVP_PIC_BASE; 6146 else 6147 OpFlag = X86II::MO_TLVP; 6148 DebugLoc DL = Op.getDebugLoc(); 6149 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, 6150 getPointerTy(), 6151 GA->getOffset(), OpFlag); 6152 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 6153 6154 // With PIC32, the address is actually $g + Offset. 6155 if (PIC32) 6156 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(), 6157 DAG.getNode(X86ISD::GlobalBaseReg, 6158 DebugLoc(), getPointerTy()), 6159 Offset); 6160 6161 // Lowering the machine isd will make sure everything is in the right 6162 // location. 6163 SDValue Args[] = { Offset }; 6164 SDValue Chain = DAG.getNode(X86ISD::TLSCALL, DL, MVT::Other, Args, 1); 6165 6166 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls. 6167 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 6168 MFI->setAdjustsStack(true); 6169 6170 // And our return value (tls address) is in the standard call return value 6171 // location. 6172 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 6173 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy()); 6174 } 6175 6176 assert(false && 6177 "TLS not implemented for this target."); 6178 6179 llvm_unreachable("Unreachable"); 6180 return SDValue(); 6181} 6182 6183 6184/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and 6185/// take a 2 x i32 value to shift plus a shift amount. 6186SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { 6187 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 6188 EVT VT = Op.getValueType(); 6189 unsigned VTBits = VT.getSizeInBits(); 6190 DebugLoc dl = Op.getDebugLoc(); 6191 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 6192 SDValue ShOpLo = Op.getOperand(0); 6193 SDValue ShOpHi = Op.getOperand(1); 6194 SDValue ShAmt = Op.getOperand(2); 6195 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 6196 DAG.getConstant(VTBits - 1, MVT::i8)) 6197 : DAG.getConstant(0, VT); 6198 6199 SDValue Tmp2, Tmp3; 6200 if (Op.getOpcode() == ISD::SHL_PARTS) { 6201 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt); 6202 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 6203 } else { 6204 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt); 6205 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, ShAmt); 6206 } 6207 6208 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt, 6209 DAG.getConstant(VTBits, MVT::i8)); 6210 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 6211 AndNode, DAG.getConstant(0, MVT::i8)); 6212 6213 SDValue Hi, Lo; 6214 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8); 6215 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond }; 6216 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond }; 6217 6218 if (Op.getOpcode() == ISD::SHL_PARTS) { 6219 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 6220 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 6221 } else { 6222 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 6223 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 6224 } 6225 6226 SDValue Ops[2] = { Lo, Hi }; 6227 return DAG.getMergeValues(Ops, 2, dl); 6228} 6229 6230SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, 6231 SelectionDAG &DAG) const { 6232 EVT SrcVT = Op.getOperand(0).getValueType(); 6233 6234 if (SrcVT.isVector()) 6235 return SDValue(); 6236 6237 assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 && 6238 "Unknown SINT_TO_FP to lower!"); 6239 6240 // These are really Legal; return the operand so the caller accepts it as 6241 // Legal. 6242 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 6243 return Op; 6244 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) && 6245 Subtarget->is64Bit()) { 6246 return Op; 6247 } 6248 6249 DebugLoc dl = Op.getDebugLoc(); 6250 unsigned Size = SrcVT.getSizeInBits()/8; 6251 MachineFunction &MF = DAG.getMachineFunction(); 6252 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false); 6253 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 6254 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 6255 StackSlot, 6256 MachinePointerInfo::getFixedStack(SSFI), 6257 false, false, 0); 6258 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG); 6259} 6260 6261SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, 6262 SDValue StackSlot, 6263 SelectionDAG &DAG) const { 6264 // Build the FILD 6265 DebugLoc DL = Op.getDebugLoc(); 6266 SDVTList Tys; 6267 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 6268 if (useSSE) 6269 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 6270 else 6271 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 6272 6273 unsigned ByteSize = SrcVT.getSizeInBits()/8; 6274 6275 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex(); 6276 MachineMemOperand *MMO = 6277 DAG.getMachineFunction() 6278 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 6279 MachineMemOperand::MOLoad, ByteSize, ByteSize); 6280 6281 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) }; 6282 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : 6283 X86ISD::FILD, DL, 6284 Tys, Ops, array_lengthof(Ops), 6285 SrcVT, MMO); 6286 6287 if (useSSE) { 6288 Chain = Result.getValue(1); 6289 SDValue InFlag = Result.getValue(2); 6290 6291 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 6292 // shouldn't be necessary except that RFP cannot be live across 6293 // multiple blocks. When stackifier is fixed, they can be uncoupled. 6294 MachineFunction &MF = DAG.getMachineFunction(); 6295 unsigned SSFISize = Op.getValueType().getSizeInBits()/8; 6296 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false); 6297 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 6298 Tys = DAG.getVTList(MVT::Other); 6299 SDValue Ops[] = { 6300 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag 6301 }; 6302 MachineMemOperand *MMO = 6303 DAG.getMachineFunction() 6304 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 6305 MachineMemOperand::MOStore, SSFISize, SSFISize); 6306 6307 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, 6308 Ops, array_lengthof(Ops), 6309 Op.getValueType(), MMO); 6310 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot, 6311 MachinePointerInfo::getFixedStack(SSFI), 6312 false, false, 0); 6313 } 6314 6315 return Result; 6316} 6317 6318// LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion. 6319SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, 6320 SelectionDAG &DAG) const { 6321 // This algorithm is not obvious. Here it is in C code, more or less: 6322 /* 6323 double uint64_to_double( uint32_t hi, uint32_t lo ) { 6324 static const __m128i exp = { 0x4330000045300000ULL, 0 }; 6325 static const __m128d bias = { 0x1.0p84, 0x1.0p52 }; 6326 6327 // Copy ints to xmm registers. 6328 __m128i xh = _mm_cvtsi32_si128( hi ); 6329 __m128i xl = _mm_cvtsi32_si128( lo ); 6330 6331 // Combine into low half of a single xmm register. 6332 __m128i x = _mm_unpacklo_epi32( xh, xl ); 6333 __m128d d; 6334 double sd; 6335 6336 // Merge in appropriate exponents to give the integer bits the right 6337 // magnitude. 6338 x = _mm_unpacklo_epi32( x, exp ); 6339 6340 // Subtract away the biases to deal with the IEEE-754 double precision 6341 // implicit 1. 6342 d = _mm_sub_pd( (__m128d) x, bias ); 6343 6344 // All conversions up to here are exact. The correctly rounded result is 6345 // calculated using the current rounding mode using the following 6346 // horizontal add. 6347 d = _mm_add_sd( d, _mm_unpackhi_pd( d, d ) ); 6348 _mm_store_sd( &sd, d ); // Because we are returning doubles in XMM, this 6349 // store doesn't really need to be here (except 6350 // maybe to zero the other double) 6351 return sd; 6352 } 6353 */ 6354 6355 DebugLoc dl = Op.getDebugLoc(); 6356 LLVMContext *Context = DAG.getContext(); 6357 6358 // Build some magic constants. 6359 std::vector<Constant*> CV0; 6360 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0x45300000))); 6361 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0x43300000))); 6362 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0))); 6363 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0))); 6364 Constant *C0 = ConstantVector::get(CV0); 6365 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16); 6366 6367 std::vector<Constant*> CV1; 6368 CV1.push_back( 6369 ConstantFP::get(*Context, APFloat(APInt(64, 0x4530000000000000ULL)))); 6370 CV1.push_back( 6371 ConstantFP::get(*Context, APFloat(APInt(64, 0x4330000000000000ULL)))); 6372 Constant *C1 = ConstantVector::get(CV1); 6373 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16); 6374 6375 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 6376 DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 6377 Op.getOperand(0), 6378 DAG.getIntPtrConstant(1))); 6379 SDValue XR2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 6380 DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 6381 Op.getOperand(0), 6382 DAG.getIntPtrConstant(0))); 6383 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32, XR1, XR2); 6384 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0, 6385 MachinePointerInfo::getConstantPool(), 6386 false, false, 16); 6387 SDValue Unpck2 = getUnpackl(DAG, dl, MVT::v4i32, Unpck1, CLod0); 6388 SDValue XR2F = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Unpck2); 6389 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1, 6390 MachinePointerInfo::getConstantPool(), 6391 false, false, 16); 6392 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1); 6393 6394 // Add the halves; easiest way is to swap them into another reg first. 6395 int ShufMask[2] = { 1, -1 }; 6396 SDValue Shuf = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, 6397 DAG.getUNDEF(MVT::v2f64), ShufMask); 6398 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuf, Sub); 6399 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Add, 6400 DAG.getIntPtrConstant(0)); 6401} 6402 6403// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion. 6404SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, 6405 SelectionDAG &DAG) const { 6406 DebugLoc dl = Op.getDebugLoc(); 6407 // FP constant to bias correct the final result. 6408 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), 6409 MVT::f64); 6410 6411 // Load the 32-bit value into an XMM register. 6412 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 6413 DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 6414 Op.getOperand(0), 6415 DAG.getIntPtrConstant(0))); 6416 6417 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 6418 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Load), 6419 DAG.getIntPtrConstant(0)); 6420 6421 // Or the load with the bias. 6422 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, 6423 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, 6424 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 6425 MVT::v2f64, Load)), 6426 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, 6427 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 6428 MVT::v2f64, Bias))); 6429 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 6430 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Or), 6431 DAG.getIntPtrConstant(0)); 6432 6433 // Subtract the bias. 6434 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias); 6435 6436 // Handle final rounding. 6437 EVT DestVT = Op.getValueType(); 6438 6439 if (DestVT.bitsLT(MVT::f64)) { 6440 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 6441 DAG.getIntPtrConstant(0)); 6442 } else if (DestVT.bitsGT(MVT::f64)) { 6443 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 6444 } 6445 6446 // Handle final rounding. 6447 return Sub; 6448} 6449 6450SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, 6451 SelectionDAG &DAG) const { 6452 SDValue N0 = Op.getOperand(0); 6453 DebugLoc dl = Op.getDebugLoc(); 6454 6455 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't 6456 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform 6457 // the optimization here. 6458 if (DAG.SignBitIsZero(N0)) 6459 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0); 6460 6461 EVT SrcVT = N0.getValueType(); 6462 EVT DstVT = Op.getValueType(); 6463 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64) 6464 return LowerUINT_TO_FP_i64(Op, DAG); 6465 else if (SrcVT == MVT::i32 && X86ScalarSSEf64) 6466 return LowerUINT_TO_FP_i32(Op, DAG); 6467 6468 // Make a 64-bit buffer, and use it to build an FILD. 6469 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64); 6470 if (SrcVT == MVT::i32) { 6471 SDValue WordOff = DAG.getConstant(4, getPointerTy()); 6472 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl, 6473 getPointerTy(), StackSlot, WordOff); 6474 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 6475 StackSlot, MachinePointerInfo(), 6476 false, false, 0); 6477 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32), 6478 OffsetSlot, MachinePointerInfo(), 6479 false, false, 0); 6480 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG); 6481 return Fild; 6482 } 6483 6484 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP"); 6485 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 6486 StackSlot, MachinePointerInfo(), 6487 false, false, 0); 6488 // For i64 source, we need to add the appropriate power of 2 if the input 6489 // was negative. This is the same as the optimization in 6490 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here, 6491 // we must be careful to do the computation in x87 extended precision, not 6492 // in SSE. (The generic code can't know it's OK to do this, or how to.) 6493 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex(); 6494 MachineMemOperand *MMO = 6495 DAG.getMachineFunction() 6496 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 6497 MachineMemOperand::MOLoad, 8, 8); 6498 6499 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other); 6500 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) }; 6501 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, 3, 6502 MVT::i64, MMO); 6503 6504 APInt FF(32, 0x5F800000ULL); 6505 6506 // Check whether the sign bit is set. 6507 SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(MVT::i64), 6508 Op.getOperand(0), DAG.getConstant(0, MVT::i64), 6509 ISD::SETLT); 6510 6511 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits. 6512 SDValue FudgePtr = DAG.getConstantPool( 6513 ConstantInt::get(*DAG.getContext(), FF.zext(64)), 6514 getPointerTy()); 6515 6516 // Get a pointer to FF if the sign bit was set, or to 0 otherwise. 6517 SDValue Zero = DAG.getIntPtrConstant(0); 6518 SDValue Four = DAG.getIntPtrConstant(4); 6519 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet, 6520 Zero, Four); 6521 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset); 6522 6523 // Load the value out, extending it from f32 to f80. 6524 // FIXME: Avoid the extend by constructing the right constant pool? 6525 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, MVT::f80, dl, DAG.getEntryNode(), 6526 FudgePtr, MachinePointerInfo::getConstantPool(), 6527 MVT::f32, false, false, 4); 6528 // Extend everything to 80 bits to force it to be done on x87. 6529 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge); 6530 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0)); 6531} 6532 6533std::pair<SDValue,SDValue> X86TargetLowering:: 6534FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) const { 6535 DebugLoc DL = Op.getDebugLoc(); 6536 6537 EVT DstTy = Op.getValueType(); 6538 6539 if (!IsSigned) { 6540 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT"); 6541 DstTy = MVT::i64; 6542 } 6543 6544 assert(DstTy.getSimpleVT() <= MVT::i64 && 6545 DstTy.getSimpleVT() >= MVT::i16 && 6546 "Unknown FP_TO_SINT to lower!"); 6547 6548 // These are really Legal. 6549 if (DstTy == MVT::i32 && 6550 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 6551 return std::make_pair(SDValue(), SDValue()); 6552 if (Subtarget->is64Bit() && 6553 DstTy == MVT::i64 && 6554 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 6555 return std::make_pair(SDValue(), SDValue()); 6556 6557 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 6558 // stack slot. 6559 MachineFunction &MF = DAG.getMachineFunction(); 6560 unsigned MemSize = DstTy.getSizeInBits()/8; 6561 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 6562 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 6563 6564 6565 6566 unsigned Opc; 6567 switch (DstTy.getSimpleVT().SimpleTy) { 6568 default: llvm_unreachable("Invalid FP_TO_SINT to lower!"); 6569 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 6570 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 6571 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 6572 } 6573 6574 SDValue Chain = DAG.getEntryNode(); 6575 SDValue Value = Op.getOperand(0); 6576 EVT TheVT = Op.getOperand(0).getValueType(); 6577 if (isScalarFPTypeInSSEReg(TheVT)) { 6578 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 6579 Chain = DAG.getStore(Chain, DL, Value, StackSlot, 6580 MachinePointerInfo::getFixedStack(SSFI), 6581 false, false, 0); 6582 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 6583 SDValue Ops[] = { 6584 Chain, StackSlot, DAG.getValueType(TheVT) 6585 }; 6586 6587 MachineMemOperand *MMO = 6588 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 6589 MachineMemOperand::MOLoad, MemSize, MemSize); 6590 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, 3, 6591 DstTy, MMO); 6592 Chain = Value.getValue(1); 6593 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 6594 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 6595 } 6596 6597 MachineMemOperand *MMO = 6598 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 6599 MachineMemOperand::MOStore, MemSize, MemSize); 6600 6601 // Build the FP_TO_INT*_IN_MEM 6602 SDValue Ops[] = { Chain, Value, StackSlot }; 6603 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other), 6604 Ops, 3, DstTy, MMO); 6605 6606 return std::make_pair(FIST, StackSlot); 6607} 6608 6609SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, 6610 SelectionDAG &DAG) const { 6611 if (Op.getValueType().isVector()) 6612 return SDValue(); 6613 6614 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, true); 6615 SDValue FIST = Vals.first, StackSlot = Vals.second; 6616 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal. 6617 if (FIST.getNode() == 0) return Op; 6618 6619 // Load the result. 6620 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 6621 FIST, StackSlot, MachinePointerInfo(), false, false, 0); 6622} 6623 6624SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, 6625 SelectionDAG &DAG) const { 6626 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, false); 6627 SDValue FIST = Vals.first, StackSlot = Vals.second; 6628 assert(FIST.getNode() && "Unexpected failure"); 6629 6630 // Load the result. 6631 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 6632 FIST, StackSlot, MachinePointerInfo(), false, false, 0); 6633} 6634 6635SDValue X86TargetLowering::LowerFABS(SDValue Op, 6636 SelectionDAG &DAG) const { 6637 LLVMContext *Context = DAG.getContext(); 6638 DebugLoc dl = Op.getDebugLoc(); 6639 EVT VT = Op.getValueType(); 6640 EVT EltVT = VT; 6641 if (VT.isVector()) 6642 EltVT = VT.getVectorElementType(); 6643 std::vector<Constant*> CV; 6644 if (EltVT == MVT::f64) { 6645 Constant *C = ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63)))); 6646 CV.push_back(C); 6647 CV.push_back(C); 6648 } else { 6649 Constant *C = ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31)))); 6650 CV.push_back(C); 6651 CV.push_back(C); 6652 CV.push_back(C); 6653 CV.push_back(C); 6654 } 6655 Constant *C = ConstantVector::get(CV); 6656 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 6657 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 6658 MachinePointerInfo::getConstantPool(), 6659 false, false, 16); 6660 return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask); 6661} 6662 6663SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const { 6664 LLVMContext *Context = DAG.getContext(); 6665 DebugLoc dl = Op.getDebugLoc(); 6666 EVT VT = Op.getValueType(); 6667 EVT EltVT = VT; 6668 if (VT.isVector()) 6669 EltVT = VT.getVectorElementType(); 6670 std::vector<Constant*> CV; 6671 if (EltVT == MVT::f64) { 6672 Constant *C = ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63))); 6673 CV.push_back(C); 6674 CV.push_back(C); 6675 } else { 6676 Constant *C = ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31))); 6677 CV.push_back(C); 6678 CV.push_back(C); 6679 CV.push_back(C); 6680 CV.push_back(C); 6681 } 6682 Constant *C = ConstantVector::get(CV); 6683 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 6684 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 6685 MachinePointerInfo::getConstantPool(), 6686 false, false, 16); 6687 if (VT.isVector()) { 6688 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, 6689 DAG.getNode(ISD::XOR, dl, MVT::v2i64, 6690 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, 6691 Op.getOperand(0)), 6692 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, Mask))); 6693 } else { 6694 return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask); 6695 } 6696} 6697 6698SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 6699 LLVMContext *Context = DAG.getContext(); 6700 SDValue Op0 = Op.getOperand(0); 6701 SDValue Op1 = Op.getOperand(1); 6702 DebugLoc dl = Op.getDebugLoc(); 6703 EVT VT = Op.getValueType(); 6704 EVT SrcVT = Op1.getValueType(); 6705 6706 // If second operand is smaller, extend it first. 6707 if (SrcVT.bitsLT(VT)) { 6708 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1); 6709 SrcVT = VT; 6710 } 6711 // And if it is bigger, shrink it first. 6712 if (SrcVT.bitsGT(VT)) { 6713 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1)); 6714 SrcVT = VT; 6715 } 6716 6717 // At this point the operands and the result should have the same 6718 // type, and that won't be f80 since that is not custom lowered. 6719 6720 // First get the sign bit of second operand. 6721 std::vector<Constant*> CV; 6722 if (SrcVT == MVT::f64) { 6723 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63)))); 6724 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 6725 } else { 6726 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31)))); 6727 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 6728 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 6729 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 6730 } 6731 Constant *C = ConstantVector::get(CV); 6732 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 6733 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx, 6734 MachinePointerInfo::getConstantPool(), 6735 false, false, 16); 6736 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1); 6737 6738 // Shift sign bit right or left if the two operands have different types. 6739 if (SrcVT.bitsGT(VT)) { 6740 // Op0 is MVT::f32, Op1 is MVT::f64. 6741 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit); 6742 SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit, 6743 DAG.getConstant(32, MVT::i32)); 6744 SignBit = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32, SignBit); 6745 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit, 6746 DAG.getIntPtrConstant(0)); 6747 } 6748 6749 // Clear first operand sign bit. 6750 CV.clear(); 6751 if (VT == MVT::f64) { 6752 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63))))); 6753 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 6754 } else { 6755 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31))))); 6756 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 6757 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 6758 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 6759 } 6760 C = ConstantVector::get(CV); 6761 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 6762 SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 6763 MachinePointerInfo::getConstantPool(), 6764 false, false, 16); 6765 SDValue Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Mask2); 6766 6767 // Or the value with the sign bit. 6768 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit); 6769} 6770 6771/// Emit nodes that will be selected as "test Op0,Op0", or something 6772/// equivalent. 6773SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, 6774 SelectionDAG &DAG) const { 6775 DebugLoc dl = Op.getDebugLoc(); 6776 6777 // CF and OF aren't always set the way we want. Determine which 6778 // of these we need. 6779 bool NeedCF = false; 6780 bool NeedOF = false; 6781 switch (X86CC) { 6782 default: break; 6783 case X86::COND_A: case X86::COND_AE: 6784 case X86::COND_B: case X86::COND_BE: 6785 NeedCF = true; 6786 break; 6787 case X86::COND_G: case X86::COND_GE: 6788 case X86::COND_L: case X86::COND_LE: 6789 case X86::COND_O: case X86::COND_NO: 6790 NeedOF = true; 6791 break; 6792 } 6793 6794 // See if we can use the EFLAGS value from the operand instead of 6795 // doing a separate TEST. TEST always sets OF and CF to 0, so unless 6796 // we prove that the arithmetic won't overflow, we can't use OF or CF. 6797 if (Op.getResNo() != 0 || NeedOF || NeedCF) 6798 // Emit a CMP with 0, which is the TEST pattern. 6799 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 6800 DAG.getConstant(0, Op.getValueType())); 6801 6802 unsigned Opcode = 0; 6803 unsigned NumOperands = 0; 6804 switch (Op.getNode()->getOpcode()) { 6805 case ISD::ADD: 6806 // Due to an isel shortcoming, be conservative if this add is likely to be 6807 // selected as part of a load-modify-store instruction. When the root node 6808 // in a match is a store, isel doesn't know how to remap non-chain non-flag 6809 // uses of other nodes in the match, such as the ADD in this case. This 6810 // leads to the ADD being left around and reselected, with the result being 6811 // two adds in the output. Alas, even if none our users are stores, that 6812 // doesn't prove we're O.K. Ergo, if we have any parents that aren't 6813 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require 6814 // climbing the DAG back to the root, and it doesn't seem to be worth the 6815 // effort. 6816 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 6817 UE = Op.getNode()->use_end(); UI != UE; ++UI) 6818 if (UI->getOpcode() != ISD::CopyToReg && UI->getOpcode() != ISD::SETCC) 6819 goto default_case; 6820 6821 if (ConstantSDNode *C = 6822 dyn_cast<ConstantSDNode>(Op.getNode()->getOperand(1))) { 6823 // An add of one will be selected as an INC. 6824 if (C->getAPIntValue() == 1) { 6825 Opcode = X86ISD::INC; 6826 NumOperands = 1; 6827 break; 6828 } 6829 6830 // An add of negative one (subtract of one) will be selected as a DEC. 6831 if (C->getAPIntValue().isAllOnesValue()) { 6832 Opcode = X86ISD::DEC; 6833 NumOperands = 1; 6834 break; 6835 } 6836 } 6837 6838 // Otherwise use a regular EFLAGS-setting add. 6839 Opcode = X86ISD::ADD; 6840 NumOperands = 2; 6841 break; 6842 case ISD::AND: { 6843 // If the primary and result isn't used, don't bother using X86ISD::AND, 6844 // because a TEST instruction will be better. 6845 bool NonFlagUse = false; 6846 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 6847 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 6848 SDNode *User = *UI; 6849 unsigned UOpNo = UI.getOperandNo(); 6850 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) { 6851 // Look pass truncate. 6852 UOpNo = User->use_begin().getOperandNo(); 6853 User = *User->use_begin(); 6854 } 6855 6856 if (User->getOpcode() != ISD::BRCOND && 6857 User->getOpcode() != ISD::SETCC && 6858 (User->getOpcode() != ISD::SELECT || UOpNo != 0)) { 6859 NonFlagUse = true; 6860 break; 6861 } 6862 } 6863 6864 if (!NonFlagUse) 6865 break; 6866 } 6867 // FALL THROUGH 6868 case ISD::SUB: 6869 case ISD::OR: 6870 case ISD::XOR: 6871 // Due to the ISEL shortcoming noted above, be conservative if this op is 6872 // likely to be selected as part of a load-modify-store instruction. 6873 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 6874 UE = Op.getNode()->use_end(); UI != UE; ++UI) 6875 if (UI->getOpcode() == ISD::STORE) 6876 goto default_case; 6877 6878 // Otherwise use a regular EFLAGS-setting instruction. 6879 switch (Op.getNode()->getOpcode()) { 6880 default: llvm_unreachable("unexpected operator!"); 6881 case ISD::SUB: Opcode = X86ISD::SUB; break; 6882 case ISD::OR: Opcode = X86ISD::OR; break; 6883 case ISD::XOR: Opcode = X86ISD::XOR; break; 6884 case ISD::AND: Opcode = X86ISD::AND; break; 6885 } 6886 6887 NumOperands = 2; 6888 break; 6889 case X86ISD::ADD: 6890 case X86ISD::SUB: 6891 case X86ISD::INC: 6892 case X86ISD::DEC: 6893 case X86ISD::OR: 6894 case X86ISD::XOR: 6895 case X86ISD::AND: 6896 return SDValue(Op.getNode(), 1); 6897 default: 6898 default_case: 6899 break; 6900 } 6901 6902 if (Opcode == 0) 6903 // Emit a CMP with 0, which is the TEST pattern. 6904 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 6905 DAG.getConstant(0, Op.getValueType())); 6906 6907 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 6908 SmallVector<SDValue, 4> Ops; 6909 for (unsigned i = 0; i != NumOperands; ++i) 6910 Ops.push_back(Op.getOperand(i)); 6911 6912 SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands); 6913 DAG.ReplaceAllUsesWith(Op, New); 6914 return SDValue(New.getNode(), 1); 6915} 6916 6917/// Emit nodes that will be selected as "cmp Op0,Op1", or something 6918/// equivalent. 6919SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, 6920 SelectionDAG &DAG) const { 6921 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) 6922 if (C->getAPIntValue() == 0) 6923 return EmitTest(Op0, X86CC, DAG); 6924 6925 DebugLoc dl = Op0.getDebugLoc(); 6926 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1); 6927} 6928 6929/// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node 6930/// if it's possible. 6931SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, 6932 DebugLoc dl, SelectionDAG &DAG) const { 6933 SDValue Op0 = And.getOperand(0); 6934 SDValue Op1 = And.getOperand(1); 6935 if (Op0.getOpcode() == ISD::TRUNCATE) 6936 Op0 = Op0.getOperand(0); 6937 if (Op1.getOpcode() == ISD::TRUNCATE) 6938 Op1 = Op1.getOperand(0); 6939 6940 SDValue LHS, RHS; 6941 if (Op1.getOpcode() == ISD::SHL) 6942 std::swap(Op0, Op1); 6943 if (Op0.getOpcode() == ISD::SHL) { 6944 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0))) 6945 if (And00C->getZExtValue() == 1) { 6946 // If we looked past a truncate, check that it's only truncating away 6947 // known zeros. 6948 unsigned BitWidth = Op0.getValueSizeInBits(); 6949 unsigned AndBitWidth = And.getValueSizeInBits(); 6950 if (BitWidth > AndBitWidth) { 6951 APInt Mask = APInt::getAllOnesValue(BitWidth), Zeros, Ones; 6952 DAG.ComputeMaskedBits(Op0, Mask, Zeros, Ones); 6953 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth) 6954 return SDValue(); 6955 } 6956 LHS = Op1; 6957 RHS = Op0.getOperand(1); 6958 } 6959 } else if (Op1.getOpcode() == ISD::Constant) { 6960 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1); 6961 SDValue AndLHS = Op0; 6962 if (AndRHS->getZExtValue() == 1 && AndLHS.getOpcode() == ISD::SRL) { 6963 LHS = AndLHS.getOperand(0); 6964 RHS = AndLHS.getOperand(1); 6965 } 6966 } 6967 6968 if (LHS.getNode()) { 6969 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT 6970 // instruction. Since the shift amount is in-range-or-undefined, we know 6971 // that doing a bittest on the i32 value is ok. We extend to i32 because 6972 // the encoding for the i16 version is larger than the i32 version. 6973 // Also promote i16 to i32 for performance / code size reason. 6974 if (LHS.getValueType() == MVT::i8 || 6975 LHS.getValueType() == MVT::i16) 6976 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS); 6977 6978 // If the operand types disagree, extend the shift amount to match. Since 6979 // BT ignores high bits (like shifts) we can use anyextend. 6980 if (LHS.getValueType() != RHS.getValueType()) 6981 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS); 6982 6983 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS); 6984 unsigned Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B; 6985 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 6986 DAG.getConstant(Cond, MVT::i8), BT); 6987 } 6988 6989 return SDValue(); 6990} 6991 6992SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 6993 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 6994 SDValue Op0 = Op.getOperand(0); 6995 SDValue Op1 = Op.getOperand(1); 6996 DebugLoc dl = Op.getDebugLoc(); 6997 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 6998 6999 // Optimize to BT if possible. 7000 // Lower (X & (1 << N)) == 0 to BT(X, N). 7001 // Lower ((X >>u N) & 1) != 0 to BT(X, N). 7002 // Lower ((X >>s N) & 1) != 0 to BT(X, N). 7003 if (Op0.getOpcode() == ISD::AND && 7004 Op0.hasOneUse() && 7005 Op1.getOpcode() == ISD::Constant && 7006 cast<ConstantSDNode>(Op1)->isNullValue() && 7007 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 7008 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG); 7009 if (NewSetCC.getNode()) 7010 return NewSetCC; 7011 } 7012 7013 // Look for "(setcc) == / != 1" to avoid unncessary setcc. 7014 if (Op0.getOpcode() == X86ISD::SETCC && 7015 Op1.getOpcode() == ISD::Constant && 7016 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 || 7017 cast<ConstantSDNode>(Op1)->isNullValue()) && 7018 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 7019 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0); 7020 bool Invert = (CC == ISD::SETNE) ^ 7021 cast<ConstantSDNode>(Op1)->isNullValue(); 7022 if (Invert) 7023 CCode = X86::GetOppositeBranchCondition(CCode); 7024 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 7025 DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1)); 7026 } 7027 7028 bool isFP = Op1.getValueType().isFloatingPoint(); 7029 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG); 7030 if (X86CC == X86::COND_INVALID) 7031 return SDValue(); 7032 7033 SDValue Cond = EmitCmp(Op0, Op1, X86CC, DAG); 7034 7035 // Use sbb x, x to materialize carry bit into a GPR. 7036 if (X86CC == X86::COND_B) 7037 return DAG.getNode(ISD::AND, dl, MVT::i8, 7038 DAG.getNode(X86ISD::SETCC_CARRY, dl, MVT::i8, 7039 DAG.getConstant(X86CC, MVT::i8), Cond), 7040 DAG.getConstant(1, MVT::i8)); 7041 7042 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 7043 DAG.getConstant(X86CC, MVT::i8), Cond); 7044} 7045 7046SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const { 7047 SDValue Cond; 7048 SDValue Op0 = Op.getOperand(0); 7049 SDValue Op1 = Op.getOperand(1); 7050 SDValue CC = Op.getOperand(2); 7051 EVT VT = Op.getValueType(); 7052 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 7053 bool isFP = Op.getOperand(1).getValueType().isFloatingPoint(); 7054 DebugLoc dl = Op.getDebugLoc(); 7055 7056 if (isFP) { 7057 unsigned SSECC = 8; 7058 EVT VT0 = Op0.getValueType(); 7059 assert(VT0 == MVT::v4f32 || VT0 == MVT::v2f64); 7060 unsigned Opc = VT0 == MVT::v4f32 ? X86ISD::CMPPS : X86ISD::CMPPD; 7061 bool Swap = false; 7062 7063 switch (SetCCOpcode) { 7064 default: break; 7065 case ISD::SETOEQ: 7066 case ISD::SETEQ: SSECC = 0; break; 7067 case ISD::SETOGT: 7068 case ISD::SETGT: Swap = true; // Fallthrough 7069 case ISD::SETLT: 7070 case ISD::SETOLT: SSECC = 1; break; 7071 case ISD::SETOGE: 7072 case ISD::SETGE: Swap = true; // Fallthrough 7073 case ISD::SETLE: 7074 case ISD::SETOLE: SSECC = 2; break; 7075 case ISD::SETUO: SSECC = 3; break; 7076 case ISD::SETUNE: 7077 case ISD::SETNE: SSECC = 4; break; 7078 case ISD::SETULE: Swap = true; 7079 case ISD::SETUGE: SSECC = 5; break; 7080 case ISD::SETULT: Swap = true; 7081 case ISD::SETUGT: SSECC = 6; break; 7082 case ISD::SETO: SSECC = 7; break; 7083 } 7084 if (Swap) 7085 std::swap(Op0, Op1); 7086 7087 // In the two special cases we can't handle, emit two comparisons. 7088 if (SSECC == 8) { 7089 if (SetCCOpcode == ISD::SETUEQ) { 7090 SDValue UNORD, EQ; 7091 UNORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(3, MVT::i8)); 7092 EQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(0, MVT::i8)); 7093 return DAG.getNode(ISD::OR, dl, VT, UNORD, EQ); 7094 } 7095 else if (SetCCOpcode == ISD::SETONE) { 7096 SDValue ORD, NEQ; 7097 ORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(7, MVT::i8)); 7098 NEQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(4, MVT::i8)); 7099 return DAG.getNode(ISD::AND, dl, VT, ORD, NEQ); 7100 } 7101 llvm_unreachable("Illegal FP comparison"); 7102 } 7103 // Handle all other FP comparisons here. 7104 return DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(SSECC, MVT::i8)); 7105 } 7106 7107 // We are handling one of the integer comparisons here. Since SSE only has 7108 // GT and EQ comparisons for integer, swapping operands and multiple 7109 // operations may be required for some comparisons. 7110 unsigned Opc = 0, EQOpc = 0, GTOpc = 0; 7111 bool Swap = false, Invert = false, FlipSigns = false; 7112 7113 switch (VT.getSimpleVT().SimpleTy) { 7114 default: break; 7115 case MVT::v16i8: EQOpc = X86ISD::PCMPEQB; GTOpc = X86ISD::PCMPGTB; break; 7116 case MVT::v8i16: EQOpc = X86ISD::PCMPEQW; GTOpc = X86ISD::PCMPGTW; break; 7117 case MVT::v4i32: EQOpc = X86ISD::PCMPEQD; GTOpc = X86ISD::PCMPGTD; break; 7118 case MVT::v2i64: EQOpc = X86ISD::PCMPEQQ; GTOpc = X86ISD::PCMPGTQ; break; 7119 } 7120 7121 switch (SetCCOpcode) { 7122 default: break; 7123 case ISD::SETNE: Invert = true; 7124 case ISD::SETEQ: Opc = EQOpc; break; 7125 case ISD::SETLT: Swap = true; 7126 case ISD::SETGT: Opc = GTOpc; break; 7127 case ISD::SETGE: Swap = true; 7128 case ISD::SETLE: Opc = GTOpc; Invert = true; break; 7129 case ISD::SETULT: Swap = true; 7130 case ISD::SETUGT: Opc = GTOpc; FlipSigns = true; break; 7131 case ISD::SETUGE: Swap = true; 7132 case ISD::SETULE: Opc = GTOpc; FlipSigns = true; Invert = true; break; 7133 } 7134 if (Swap) 7135 std::swap(Op0, Op1); 7136 7137 // Since SSE has no unsigned integer comparisons, we need to flip the sign 7138 // bits of the inputs before performing those operations. 7139 if (FlipSigns) { 7140 EVT EltVT = VT.getVectorElementType(); 7141 SDValue SignBit = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), 7142 EltVT); 7143 std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit); 7144 SDValue SignVec = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &SignBits[0], 7145 SignBits.size()); 7146 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SignVec); 7147 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SignVec); 7148 } 7149 7150 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 7151 7152 // If the logical-not of the result is required, perform that now. 7153 if (Invert) 7154 Result = DAG.getNOT(dl, Result, VT); 7155 7156 return Result; 7157} 7158 7159// isX86LogicalCmp - Return true if opcode is a X86 logical comparison. 7160static bool isX86LogicalCmp(SDValue Op) { 7161 unsigned Opc = Op.getNode()->getOpcode(); 7162 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) 7163 return true; 7164 if (Op.getResNo() == 1 && 7165 (Opc == X86ISD::ADD || 7166 Opc == X86ISD::SUB || 7167 Opc == X86ISD::SMUL || 7168 Opc == X86ISD::UMUL || 7169 Opc == X86ISD::INC || 7170 Opc == X86ISD::DEC || 7171 Opc == X86ISD::OR || 7172 Opc == X86ISD::XOR || 7173 Opc == X86ISD::AND)) 7174 return true; 7175 7176 return false; 7177} 7178 7179SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 7180 bool addTest = true; 7181 SDValue Cond = Op.getOperand(0); 7182 DebugLoc dl = Op.getDebugLoc(); 7183 SDValue CC; 7184 7185 if (Cond.getOpcode() == ISD::SETCC) { 7186 SDValue NewCond = LowerSETCC(Cond, DAG); 7187 if (NewCond.getNode()) 7188 Cond = NewCond; 7189 } 7190 7191 // (select (x == 0), -1, 0) -> (sign_bit (x - 1)) 7192 SDValue Op1 = Op.getOperand(1); 7193 SDValue Op2 = Op.getOperand(2); 7194 if (Cond.getOpcode() == X86ISD::SETCC && 7195 cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue() == X86::COND_E) { 7196 SDValue Cmp = Cond.getOperand(1); 7197 if (Cmp.getOpcode() == X86ISD::CMP) { 7198 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op1); 7199 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2); 7200 ConstantSDNode *RHSC = 7201 dyn_cast<ConstantSDNode>(Cmp.getOperand(1).getNode()); 7202 if (N1C && N1C->isAllOnesValue() && 7203 N2C && N2C->isNullValue() && 7204 RHSC && RHSC->isNullValue()) { 7205 SDValue CmpOp0 = Cmp.getOperand(0); 7206 Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 7207 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); 7208 return DAG.getNode(X86ISD::SETCC_CARRY, dl, Op.getValueType(), 7209 DAG.getConstant(X86::COND_B, MVT::i8), Cmp); 7210 } 7211 } 7212 } 7213 7214 // Look pass (and (setcc_carry (cmp ...)), 1). 7215 if (Cond.getOpcode() == ISD::AND && 7216 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 7217 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 7218 if (C && C->getAPIntValue() == 1) 7219 Cond = Cond.getOperand(0); 7220 } 7221 7222 // If condition flag is set by a X86ISD::CMP, then use it as the condition 7223 // setting operand in place of the X86ISD::SETCC. 7224 if (Cond.getOpcode() == X86ISD::SETCC || 7225 Cond.getOpcode() == X86ISD::SETCC_CARRY) { 7226 CC = Cond.getOperand(0); 7227 7228 SDValue Cmp = Cond.getOperand(1); 7229 unsigned Opc = Cmp.getOpcode(); 7230 EVT VT = Op.getValueType(); 7231 7232 bool IllegalFPCMov = false; 7233 if (VT.isFloatingPoint() && !VT.isVector() && 7234 !isScalarFPTypeInSSEReg(VT)) // FPStack? 7235 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue()); 7236 7237 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) || 7238 Opc == X86ISD::BT) { // FIXME 7239 Cond = Cmp; 7240 addTest = false; 7241 } 7242 } 7243 7244 if (addTest) { 7245 // Look pass the truncate. 7246 if (Cond.getOpcode() == ISD::TRUNCATE) 7247 Cond = Cond.getOperand(0); 7248 7249 // We know the result of AND is compared against zero. Try to match 7250 // it to BT. 7251 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 7252 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG); 7253 if (NewSetCC.getNode()) { 7254 CC = NewSetCC.getOperand(0); 7255 Cond = NewSetCC.getOperand(1); 7256 addTest = false; 7257 } 7258 } 7259 } 7260 7261 if (addTest) { 7262 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 7263 Cond = EmitTest(Cond, X86::COND_NE, DAG); 7264 } 7265 7266 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 7267 // condition is true. 7268 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Flag); 7269 SDValue Ops[] = { Op2, Op1, CC, Cond }; 7270 return DAG.getNode(X86ISD::CMOV, dl, VTs, Ops, array_lengthof(Ops)); 7271} 7272 7273// isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or 7274// ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart 7275// from the AND / OR. 7276static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) { 7277 Opc = Op.getOpcode(); 7278 if (Opc != ISD::OR && Opc != ISD::AND) 7279 return false; 7280 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC && 7281 Op.getOperand(0).hasOneUse() && 7282 Op.getOperand(1).getOpcode() == X86ISD::SETCC && 7283 Op.getOperand(1).hasOneUse()); 7284} 7285 7286// isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and 7287// 1 and that the SETCC node has a single use. 7288static bool isXor1OfSetCC(SDValue Op) { 7289 if (Op.getOpcode() != ISD::XOR) 7290 return false; 7291 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 7292 if (N1C && N1C->getAPIntValue() == 1) { 7293 return Op.getOperand(0).getOpcode() == X86ISD::SETCC && 7294 Op.getOperand(0).hasOneUse(); 7295 } 7296 return false; 7297} 7298 7299SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 7300 bool addTest = true; 7301 SDValue Chain = Op.getOperand(0); 7302 SDValue Cond = Op.getOperand(1); 7303 SDValue Dest = Op.getOperand(2); 7304 DebugLoc dl = Op.getDebugLoc(); 7305 SDValue CC; 7306 7307 if (Cond.getOpcode() == ISD::SETCC) { 7308 SDValue NewCond = LowerSETCC(Cond, DAG); 7309 if (NewCond.getNode()) 7310 Cond = NewCond; 7311 } 7312#if 0 7313 // FIXME: LowerXALUO doesn't handle these!! 7314 else if (Cond.getOpcode() == X86ISD::ADD || 7315 Cond.getOpcode() == X86ISD::SUB || 7316 Cond.getOpcode() == X86ISD::SMUL || 7317 Cond.getOpcode() == X86ISD::UMUL) 7318 Cond = LowerXALUO(Cond, DAG); 7319#endif 7320 7321 // Look pass (and (setcc_carry (cmp ...)), 1). 7322 if (Cond.getOpcode() == ISD::AND && 7323 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 7324 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 7325 if (C && C->getAPIntValue() == 1) 7326 Cond = Cond.getOperand(0); 7327 } 7328 7329 // If condition flag is set by a X86ISD::CMP, then use it as the condition 7330 // setting operand in place of the X86ISD::SETCC. 7331 if (Cond.getOpcode() == X86ISD::SETCC || 7332 Cond.getOpcode() == X86ISD::SETCC_CARRY) { 7333 CC = Cond.getOperand(0); 7334 7335 SDValue Cmp = Cond.getOperand(1); 7336 unsigned Opc = Cmp.getOpcode(); 7337 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp?? 7338 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) { 7339 Cond = Cmp; 7340 addTest = false; 7341 } else { 7342 switch (cast<ConstantSDNode>(CC)->getZExtValue()) { 7343 default: break; 7344 case X86::COND_O: 7345 case X86::COND_B: 7346 // These can only come from an arithmetic instruction with overflow, 7347 // e.g. SADDO, UADDO. 7348 Cond = Cond.getNode()->getOperand(1); 7349 addTest = false; 7350 break; 7351 } 7352 } 7353 } else { 7354 unsigned CondOpc; 7355 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) { 7356 SDValue Cmp = Cond.getOperand(0).getOperand(1); 7357 if (CondOpc == ISD::OR) { 7358 // Also, recognize the pattern generated by an FCMP_UNE. We can emit 7359 // two branches instead of an explicit OR instruction with a 7360 // separate test. 7361 if (Cmp == Cond.getOperand(1).getOperand(1) && 7362 isX86LogicalCmp(Cmp)) { 7363 CC = Cond.getOperand(0).getOperand(0); 7364 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 7365 Chain, Dest, CC, Cmp); 7366 CC = Cond.getOperand(1).getOperand(0); 7367 Cond = Cmp; 7368 addTest = false; 7369 } 7370 } else { // ISD::AND 7371 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit 7372 // two branches instead of an explicit AND instruction with a 7373 // separate test. However, we only do this if this block doesn't 7374 // have a fall-through edge, because this requires an explicit 7375 // jmp when the condition is false. 7376 if (Cmp == Cond.getOperand(1).getOperand(1) && 7377 isX86LogicalCmp(Cmp) && 7378 Op.getNode()->hasOneUse()) { 7379 X86::CondCode CCode = 7380 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 7381 CCode = X86::GetOppositeBranchCondition(CCode); 7382 CC = DAG.getConstant(CCode, MVT::i8); 7383 SDNode *User = *Op.getNode()->use_begin(); 7384 // Look for an unconditional branch following this conditional branch. 7385 // We need this because we need to reverse the successors in order 7386 // to implement FCMP_OEQ. 7387 if (User->getOpcode() == ISD::BR) { 7388 SDValue FalseBB = User->getOperand(1); 7389 SDNode *NewBR = 7390 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 7391 assert(NewBR == User); 7392 (void)NewBR; 7393 Dest = FalseBB; 7394 7395 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 7396 Chain, Dest, CC, Cmp); 7397 X86::CondCode CCode = 7398 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0); 7399 CCode = X86::GetOppositeBranchCondition(CCode); 7400 CC = DAG.getConstant(CCode, MVT::i8); 7401 Cond = Cmp; 7402 addTest = false; 7403 } 7404 } 7405 } 7406 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) { 7407 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition. 7408 // It should be transformed during dag combiner except when the condition 7409 // is set by a arithmetics with overflow node. 7410 X86::CondCode CCode = 7411 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 7412 CCode = X86::GetOppositeBranchCondition(CCode); 7413 CC = DAG.getConstant(CCode, MVT::i8); 7414 Cond = Cond.getOperand(0).getOperand(1); 7415 addTest = false; 7416 } 7417 } 7418 7419 if (addTest) { 7420 // Look pass the truncate. 7421 if (Cond.getOpcode() == ISD::TRUNCATE) 7422 Cond = Cond.getOperand(0); 7423 7424 // We know the result of AND is compared against zero. Try to match 7425 // it to BT. 7426 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 7427 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG); 7428 if (NewSetCC.getNode()) { 7429 CC = NewSetCC.getOperand(0); 7430 Cond = NewSetCC.getOperand(1); 7431 addTest = false; 7432 } 7433 } 7434 } 7435 7436 if (addTest) { 7437 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 7438 Cond = EmitTest(Cond, X86::COND_NE, DAG); 7439 } 7440 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 7441 Chain, Dest, CC, Cond); 7442} 7443 7444 7445// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 7446// Calls to _alloca is needed to probe the stack when allocating more than 4k 7447// bytes in one go. Touching the stack at 4K increments is necessary to ensure 7448// that the guard pages used by the OS virtual memory manager are allocated in 7449// correct sequence. 7450SDValue 7451X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 7452 SelectionDAG &DAG) const { 7453 assert(Subtarget->isTargetCygMing() && 7454 "This should be used only on Cygwin/Mingw targets"); 7455 DebugLoc dl = Op.getDebugLoc(); 7456 7457 // Get the inputs. 7458 SDValue Chain = Op.getOperand(0); 7459 SDValue Size = Op.getOperand(1); 7460 // FIXME: Ensure alignment here 7461 7462 SDValue Flag; 7463 7464 EVT SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; 7465 7466 Chain = DAG.getCopyToReg(Chain, dl, X86::EAX, Size, Flag); 7467 Flag = Chain.getValue(1); 7468 7469 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 7470 7471 Chain = DAG.getNode(X86ISD::MINGW_ALLOCA, dl, NodeTys, Chain, Flag); 7472 Flag = Chain.getValue(1); 7473 7474 Chain = DAG.getCopyFromReg(Chain, dl, X86StackPtr, SPTy).getValue(1); 7475 7476 SDValue Ops1[2] = { Chain.getValue(0), Chain }; 7477 return DAG.getMergeValues(Ops1, 2, dl); 7478} 7479 7480SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 7481 MachineFunction &MF = DAG.getMachineFunction(); 7482 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 7483 7484 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 7485 DebugLoc DL = Op.getDebugLoc(); 7486 7487 if (!Subtarget->is64Bit()) { 7488 // vastart just stores the address of the VarArgsFrameIndex slot into the 7489 // memory location argument. 7490 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 7491 getPointerTy()); 7492 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), 7493 MachinePointerInfo(SV), false, false, 0); 7494 } 7495 7496 // __va_list_tag: 7497 // gp_offset (0 - 6 * 8) 7498 // fp_offset (48 - 48 + 8 * 16) 7499 // overflow_arg_area (point to parameters coming in memory). 7500 // reg_save_area 7501 SmallVector<SDValue, 8> MemOps; 7502 SDValue FIN = Op.getOperand(1); 7503 // Store gp_offset 7504 SDValue Store = DAG.getStore(Op.getOperand(0), DL, 7505 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), 7506 MVT::i32), 7507 FIN, MachinePointerInfo(SV), false, false, 0); 7508 MemOps.push_back(Store); 7509 7510 // Store fp_offset 7511 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7512 FIN, DAG.getIntPtrConstant(4)); 7513 Store = DAG.getStore(Op.getOperand(0), DL, 7514 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), 7515 MVT::i32), 7516 FIN, MachinePointerInfo(SV, 4), false, false, 0); 7517 MemOps.push_back(Store); 7518 7519 // Store ptr to overflow_arg_area 7520 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7521 FIN, DAG.getIntPtrConstant(4)); 7522 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 7523 getPointerTy()); 7524 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, 7525 MachinePointerInfo(SV, 8), 7526 false, false, 0); 7527 MemOps.push_back(Store); 7528 7529 // Store ptr to reg_save_area. 7530 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7531 FIN, DAG.getIntPtrConstant(8)); 7532 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 7533 getPointerTy()); 7534 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN, 7535 MachinePointerInfo(SV, 16), false, false, 0); 7536 MemOps.push_back(Store); 7537 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 7538 &MemOps[0], MemOps.size()); 7539} 7540 7541SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 7542 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 7543 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_arg!"); 7544 7545 report_fatal_error("VAArgInst is not yet implemented for x86-64!"); 7546 return SDValue(); 7547} 7548 7549SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 7550 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 7551 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!"); 7552 SDValue Chain = Op.getOperand(0); 7553 SDValue DstPtr = Op.getOperand(1); 7554 SDValue SrcPtr = Op.getOperand(2); 7555 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 7556 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 7557 DebugLoc DL = Op.getDebugLoc(); 7558 7559 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, 7560 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false, 7561 false, 7562 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 7563} 7564 7565SDValue 7566X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { 7567 DebugLoc dl = Op.getDebugLoc(); 7568 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7569 switch (IntNo) { 7570 default: return SDValue(); // Don't custom lower most intrinsics. 7571 // Comparison intrinsics. 7572 case Intrinsic::x86_sse_comieq_ss: 7573 case Intrinsic::x86_sse_comilt_ss: 7574 case Intrinsic::x86_sse_comile_ss: 7575 case Intrinsic::x86_sse_comigt_ss: 7576 case Intrinsic::x86_sse_comige_ss: 7577 case Intrinsic::x86_sse_comineq_ss: 7578 case Intrinsic::x86_sse_ucomieq_ss: 7579 case Intrinsic::x86_sse_ucomilt_ss: 7580 case Intrinsic::x86_sse_ucomile_ss: 7581 case Intrinsic::x86_sse_ucomigt_ss: 7582 case Intrinsic::x86_sse_ucomige_ss: 7583 case Intrinsic::x86_sse_ucomineq_ss: 7584 case Intrinsic::x86_sse2_comieq_sd: 7585 case Intrinsic::x86_sse2_comilt_sd: 7586 case Intrinsic::x86_sse2_comile_sd: 7587 case Intrinsic::x86_sse2_comigt_sd: 7588 case Intrinsic::x86_sse2_comige_sd: 7589 case Intrinsic::x86_sse2_comineq_sd: 7590 case Intrinsic::x86_sse2_ucomieq_sd: 7591 case Intrinsic::x86_sse2_ucomilt_sd: 7592 case Intrinsic::x86_sse2_ucomile_sd: 7593 case Intrinsic::x86_sse2_ucomigt_sd: 7594 case Intrinsic::x86_sse2_ucomige_sd: 7595 case Intrinsic::x86_sse2_ucomineq_sd: { 7596 unsigned Opc = 0; 7597 ISD::CondCode CC = ISD::SETCC_INVALID; 7598 switch (IntNo) { 7599 default: break; 7600 case Intrinsic::x86_sse_comieq_ss: 7601 case Intrinsic::x86_sse2_comieq_sd: 7602 Opc = X86ISD::COMI; 7603 CC = ISD::SETEQ; 7604 break; 7605 case Intrinsic::x86_sse_comilt_ss: 7606 case Intrinsic::x86_sse2_comilt_sd: 7607 Opc = X86ISD::COMI; 7608 CC = ISD::SETLT; 7609 break; 7610 case Intrinsic::x86_sse_comile_ss: 7611 case Intrinsic::x86_sse2_comile_sd: 7612 Opc = X86ISD::COMI; 7613 CC = ISD::SETLE; 7614 break; 7615 case Intrinsic::x86_sse_comigt_ss: 7616 case Intrinsic::x86_sse2_comigt_sd: 7617 Opc = X86ISD::COMI; 7618 CC = ISD::SETGT; 7619 break; 7620 case Intrinsic::x86_sse_comige_ss: 7621 case Intrinsic::x86_sse2_comige_sd: 7622 Opc = X86ISD::COMI; 7623 CC = ISD::SETGE; 7624 break; 7625 case Intrinsic::x86_sse_comineq_ss: 7626 case Intrinsic::x86_sse2_comineq_sd: 7627 Opc = X86ISD::COMI; 7628 CC = ISD::SETNE; 7629 break; 7630 case Intrinsic::x86_sse_ucomieq_ss: 7631 case Intrinsic::x86_sse2_ucomieq_sd: 7632 Opc = X86ISD::UCOMI; 7633 CC = ISD::SETEQ; 7634 break; 7635 case Intrinsic::x86_sse_ucomilt_ss: 7636 case Intrinsic::x86_sse2_ucomilt_sd: 7637 Opc = X86ISD::UCOMI; 7638 CC = ISD::SETLT; 7639 break; 7640 case Intrinsic::x86_sse_ucomile_ss: 7641 case Intrinsic::x86_sse2_ucomile_sd: 7642 Opc = X86ISD::UCOMI; 7643 CC = ISD::SETLE; 7644 break; 7645 case Intrinsic::x86_sse_ucomigt_ss: 7646 case Intrinsic::x86_sse2_ucomigt_sd: 7647 Opc = X86ISD::UCOMI; 7648 CC = ISD::SETGT; 7649 break; 7650 case Intrinsic::x86_sse_ucomige_ss: 7651 case Intrinsic::x86_sse2_ucomige_sd: 7652 Opc = X86ISD::UCOMI; 7653 CC = ISD::SETGE; 7654 break; 7655 case Intrinsic::x86_sse_ucomineq_ss: 7656 case Intrinsic::x86_sse2_ucomineq_sd: 7657 Opc = X86ISD::UCOMI; 7658 CC = ISD::SETNE; 7659 break; 7660 } 7661 7662 SDValue LHS = Op.getOperand(1); 7663 SDValue RHS = Op.getOperand(2); 7664 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG); 7665 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!"); 7666 SDValue Cond = DAG.getNode(Opc, dl, MVT::i32, LHS, RHS); 7667 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 7668 DAG.getConstant(X86CC, MVT::i8), Cond); 7669 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 7670 } 7671 // ptest and testp intrinsics. The intrinsic these come from are designed to 7672 // return an integer value, not just an instruction so lower it to the ptest 7673 // or testp pattern and a setcc for the result. 7674 case Intrinsic::x86_sse41_ptestz: 7675 case Intrinsic::x86_sse41_ptestc: 7676 case Intrinsic::x86_sse41_ptestnzc: 7677 case Intrinsic::x86_avx_ptestz_256: 7678 case Intrinsic::x86_avx_ptestc_256: 7679 case Intrinsic::x86_avx_ptestnzc_256: 7680 case Intrinsic::x86_avx_vtestz_ps: 7681 case Intrinsic::x86_avx_vtestc_ps: 7682 case Intrinsic::x86_avx_vtestnzc_ps: 7683 case Intrinsic::x86_avx_vtestz_pd: 7684 case Intrinsic::x86_avx_vtestc_pd: 7685 case Intrinsic::x86_avx_vtestnzc_pd: 7686 case Intrinsic::x86_avx_vtestz_ps_256: 7687 case Intrinsic::x86_avx_vtestc_ps_256: 7688 case Intrinsic::x86_avx_vtestnzc_ps_256: 7689 case Intrinsic::x86_avx_vtestz_pd_256: 7690 case Intrinsic::x86_avx_vtestc_pd_256: 7691 case Intrinsic::x86_avx_vtestnzc_pd_256: { 7692 bool IsTestPacked = false; 7693 unsigned X86CC = 0; 7694 switch (IntNo) { 7695 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering."); 7696 case Intrinsic::x86_avx_vtestz_ps: 7697 case Intrinsic::x86_avx_vtestz_pd: 7698 case Intrinsic::x86_avx_vtestz_ps_256: 7699 case Intrinsic::x86_avx_vtestz_pd_256: 7700 IsTestPacked = true; // Fallthrough 7701 case Intrinsic::x86_sse41_ptestz: 7702 case Intrinsic::x86_avx_ptestz_256: 7703 // ZF = 1 7704 X86CC = X86::COND_E; 7705 break; 7706 case Intrinsic::x86_avx_vtestc_ps: 7707 case Intrinsic::x86_avx_vtestc_pd: 7708 case Intrinsic::x86_avx_vtestc_ps_256: 7709 case Intrinsic::x86_avx_vtestc_pd_256: 7710 IsTestPacked = true; // Fallthrough 7711 case Intrinsic::x86_sse41_ptestc: 7712 case Intrinsic::x86_avx_ptestc_256: 7713 // CF = 1 7714 X86CC = X86::COND_B; 7715 break; 7716 case Intrinsic::x86_avx_vtestnzc_ps: 7717 case Intrinsic::x86_avx_vtestnzc_pd: 7718 case Intrinsic::x86_avx_vtestnzc_ps_256: 7719 case Intrinsic::x86_avx_vtestnzc_pd_256: 7720 IsTestPacked = true; // Fallthrough 7721 case Intrinsic::x86_sse41_ptestnzc: 7722 case Intrinsic::x86_avx_ptestnzc_256: 7723 // ZF and CF = 0 7724 X86CC = X86::COND_A; 7725 break; 7726 } 7727 7728 SDValue LHS = Op.getOperand(1); 7729 SDValue RHS = Op.getOperand(2); 7730 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST; 7731 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS); 7732 SDValue CC = DAG.getConstant(X86CC, MVT::i8); 7733 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test); 7734 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 7735 } 7736 7737 // Fix vector shift instructions where the last operand is a non-immediate 7738 // i32 value. 7739 case Intrinsic::x86_sse2_pslli_w: 7740 case Intrinsic::x86_sse2_pslli_d: 7741 case Intrinsic::x86_sse2_pslli_q: 7742 case Intrinsic::x86_sse2_psrli_w: 7743 case Intrinsic::x86_sse2_psrli_d: 7744 case Intrinsic::x86_sse2_psrli_q: 7745 case Intrinsic::x86_sse2_psrai_w: 7746 case Intrinsic::x86_sse2_psrai_d: 7747 case Intrinsic::x86_mmx_pslli_w: 7748 case Intrinsic::x86_mmx_pslli_d: 7749 case Intrinsic::x86_mmx_pslli_q: 7750 case Intrinsic::x86_mmx_psrli_w: 7751 case Intrinsic::x86_mmx_psrli_d: 7752 case Intrinsic::x86_mmx_psrli_q: 7753 case Intrinsic::x86_mmx_psrai_w: 7754 case Intrinsic::x86_mmx_psrai_d: { 7755 SDValue ShAmt = Op.getOperand(2); 7756 if (isa<ConstantSDNode>(ShAmt)) 7757 return SDValue(); 7758 7759 unsigned NewIntNo = 0; 7760 EVT ShAmtVT = MVT::v4i32; 7761 switch (IntNo) { 7762 case Intrinsic::x86_sse2_pslli_w: 7763 NewIntNo = Intrinsic::x86_sse2_psll_w; 7764 break; 7765 case Intrinsic::x86_sse2_pslli_d: 7766 NewIntNo = Intrinsic::x86_sse2_psll_d; 7767 break; 7768 case Intrinsic::x86_sse2_pslli_q: 7769 NewIntNo = Intrinsic::x86_sse2_psll_q; 7770 break; 7771 case Intrinsic::x86_sse2_psrli_w: 7772 NewIntNo = Intrinsic::x86_sse2_psrl_w; 7773 break; 7774 case Intrinsic::x86_sse2_psrli_d: 7775 NewIntNo = Intrinsic::x86_sse2_psrl_d; 7776 break; 7777 case Intrinsic::x86_sse2_psrli_q: 7778 NewIntNo = Intrinsic::x86_sse2_psrl_q; 7779 break; 7780 case Intrinsic::x86_sse2_psrai_w: 7781 NewIntNo = Intrinsic::x86_sse2_psra_w; 7782 break; 7783 case Intrinsic::x86_sse2_psrai_d: 7784 NewIntNo = Intrinsic::x86_sse2_psra_d; 7785 break; 7786 default: { 7787 ShAmtVT = MVT::v2i32; 7788 switch (IntNo) { 7789 case Intrinsic::x86_mmx_pslli_w: 7790 NewIntNo = Intrinsic::x86_mmx_psll_w; 7791 break; 7792 case Intrinsic::x86_mmx_pslli_d: 7793 NewIntNo = Intrinsic::x86_mmx_psll_d; 7794 break; 7795 case Intrinsic::x86_mmx_pslli_q: 7796 NewIntNo = Intrinsic::x86_mmx_psll_q; 7797 break; 7798 case Intrinsic::x86_mmx_psrli_w: 7799 NewIntNo = Intrinsic::x86_mmx_psrl_w; 7800 break; 7801 case Intrinsic::x86_mmx_psrli_d: 7802 NewIntNo = Intrinsic::x86_mmx_psrl_d; 7803 break; 7804 case Intrinsic::x86_mmx_psrli_q: 7805 NewIntNo = Intrinsic::x86_mmx_psrl_q; 7806 break; 7807 case Intrinsic::x86_mmx_psrai_w: 7808 NewIntNo = Intrinsic::x86_mmx_psra_w; 7809 break; 7810 case Intrinsic::x86_mmx_psrai_d: 7811 NewIntNo = Intrinsic::x86_mmx_psra_d; 7812 break; 7813 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 7814 } 7815 break; 7816 } 7817 } 7818 7819 // The vector shift intrinsics with scalars uses 32b shift amounts but 7820 // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits 7821 // to be zero. 7822 SDValue ShOps[4]; 7823 ShOps[0] = ShAmt; 7824 ShOps[1] = DAG.getConstant(0, MVT::i32); 7825 if (ShAmtVT == MVT::v4i32) { 7826 ShOps[2] = DAG.getUNDEF(MVT::i32); 7827 ShOps[3] = DAG.getUNDEF(MVT::i32); 7828 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 4); 7829 } else { 7830 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 2); 7831// FIXME this must be lowered to get rid of the invalid type. 7832 } 7833 7834 EVT VT = Op.getValueType(); 7835 ShAmt = DAG.getNode(ISD::BIT_CONVERT, dl, VT, ShAmt); 7836 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 7837 DAG.getConstant(NewIntNo, MVT::i32), 7838 Op.getOperand(1), ShAmt); 7839 } 7840 } 7841} 7842 7843SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, 7844 SelectionDAG &DAG) const { 7845 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7846 MFI->setReturnAddressIsTaken(true); 7847 7848 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7849 DebugLoc dl = Op.getDebugLoc(); 7850 7851 if (Depth > 0) { 7852 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 7853 SDValue Offset = 7854 DAG.getConstant(TD->getPointerSize(), 7855 Subtarget->is64Bit() ? MVT::i64 : MVT::i32); 7856 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 7857 DAG.getNode(ISD::ADD, dl, getPointerTy(), 7858 FrameAddr, Offset), 7859 MachinePointerInfo(), false, false, 0); 7860 } 7861 7862 // Just load the return address. 7863 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG); 7864 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 7865 RetAddrFI, MachinePointerInfo(), false, false, 0); 7866} 7867 7868SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 7869 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7870 MFI->setFrameAddressIsTaken(true); 7871 7872 EVT VT = Op.getValueType(); 7873 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 7874 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7875 unsigned FrameReg = Subtarget->is64Bit() ? X86::RBP : X86::EBP; 7876 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 7877 while (Depth--) 7878 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 7879 MachinePointerInfo(), 7880 false, false, 0); 7881 return FrameAddr; 7882} 7883 7884SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, 7885 SelectionDAG &DAG) const { 7886 return DAG.getIntPtrConstant(2*TD->getPointerSize()); 7887} 7888 7889SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 7890 MachineFunction &MF = DAG.getMachineFunction(); 7891 SDValue Chain = Op.getOperand(0); 7892 SDValue Offset = Op.getOperand(1); 7893 SDValue Handler = Op.getOperand(2); 7894 DebugLoc dl = Op.getDebugLoc(); 7895 7896 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 7897 Subtarget->is64Bit() ? X86::RBP : X86::EBP, 7898 getPointerTy()); 7899 unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX); 7900 7901 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame, 7902 DAG.getIntPtrConstant(TD->getPointerSize())); 7903 StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset); 7904 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), 7905 false, false, 0); 7906 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr); 7907 MF.getRegInfo().addLiveOut(StoreAddrReg); 7908 7909 return DAG.getNode(X86ISD::EH_RETURN, dl, 7910 MVT::Other, 7911 Chain, DAG.getRegister(StoreAddrReg, getPointerTy())); 7912} 7913 7914SDValue X86TargetLowering::LowerTRAMPOLINE(SDValue Op, 7915 SelectionDAG &DAG) const { 7916 SDValue Root = Op.getOperand(0); 7917 SDValue Trmp = Op.getOperand(1); // trampoline 7918 SDValue FPtr = Op.getOperand(2); // nested function 7919 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 7920 DebugLoc dl = Op.getDebugLoc(); 7921 7922 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 7923 7924 if (Subtarget->is64Bit()) { 7925 SDValue OutChains[6]; 7926 7927 // Large code-model. 7928 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode. 7929 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode. 7930 7931 const unsigned char N86R10 = RegInfo->getX86RegNum(X86::R10); 7932 const unsigned char N86R11 = RegInfo->getX86RegNum(X86::R11); 7933 7934 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 7935 7936 // Load the pointer to the nested function into R11. 7937 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 7938 SDValue Addr = Trmp; 7939 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 7940 Addr, MachinePointerInfo(TrmpAddr), 7941 false, false, 0); 7942 7943 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 7944 DAG.getConstant(2, MVT::i64)); 7945 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr, 7946 MachinePointerInfo(TrmpAddr, 2), 7947 false, false, 2); 7948 7949 // Load the 'nest' parameter value into R10. 7950 // R10 is specified in X86CallingConv.td 7951 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 7952 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 7953 DAG.getConstant(10, MVT::i64)); 7954 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 7955 Addr, MachinePointerInfo(TrmpAddr, 10), 7956 false, false, 0); 7957 7958 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 7959 DAG.getConstant(12, MVT::i64)); 7960 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr, 7961 MachinePointerInfo(TrmpAddr, 12), 7962 false, false, 2); 7963 7964 // Jump to the nested function. 7965 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 7966 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 7967 DAG.getConstant(20, MVT::i64)); 7968 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 7969 Addr, MachinePointerInfo(TrmpAddr, 20), 7970 false, false, 0); 7971 7972 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 7973 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 7974 DAG.getConstant(22, MVT::i64)); 7975 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr, 7976 MachinePointerInfo(TrmpAddr, 22), 7977 false, false, 0); 7978 7979 SDValue Ops[] = 7980 { Trmp, DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 6) }; 7981 return DAG.getMergeValues(Ops, 2, dl); 7982 } else { 7983 const Function *Func = 7984 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 7985 CallingConv::ID CC = Func->getCallingConv(); 7986 unsigned NestReg; 7987 7988 switch (CC) { 7989 default: 7990 llvm_unreachable("Unsupported calling convention"); 7991 case CallingConv::C: 7992 case CallingConv::X86_StdCall: { 7993 // Pass 'nest' parameter in ECX. 7994 // Must be kept in sync with X86CallingConv.td 7995 NestReg = X86::ECX; 7996 7997 // Check that ECX wasn't needed by an 'inreg' parameter. 7998 const FunctionType *FTy = Func->getFunctionType(); 7999 const AttrListPtr &Attrs = Func->getAttributes(); 8000 8001 if (!Attrs.isEmpty() && !Func->isVarArg()) { 8002 unsigned InRegCount = 0; 8003 unsigned Idx = 1; 8004 8005 for (FunctionType::param_iterator I = FTy->param_begin(), 8006 E = FTy->param_end(); I != E; ++I, ++Idx) 8007 if (Attrs.paramHasAttr(Idx, Attribute::InReg)) 8008 // FIXME: should only count parameters that are lowered to integers. 8009 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32; 8010 8011 if (InRegCount > 2) { 8012 report_fatal_error("Nest register in use - reduce number of inreg" 8013 " parameters!"); 8014 } 8015 } 8016 break; 8017 } 8018 case CallingConv::X86_FastCall: 8019 case CallingConv::X86_ThisCall: 8020 case CallingConv::Fast: 8021 // Pass 'nest' parameter in EAX. 8022 // Must be kept in sync with X86CallingConv.td 8023 NestReg = X86::EAX; 8024 break; 8025 } 8026 8027 SDValue OutChains[4]; 8028 SDValue Addr, Disp; 8029 8030 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 8031 DAG.getConstant(10, MVT::i32)); 8032 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr); 8033 8034 // This is storing the opcode for MOV32ri. 8035 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte. 8036 const unsigned char N86Reg = RegInfo->getX86RegNum(NestReg); 8037 OutChains[0] = DAG.getStore(Root, dl, 8038 DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 8039 Trmp, MachinePointerInfo(TrmpAddr), 8040 false, false, 0); 8041 8042 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 8043 DAG.getConstant(1, MVT::i32)); 8044 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr, 8045 MachinePointerInfo(TrmpAddr, 1), 8046 false, false, 1); 8047 8048 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode. 8049 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 8050 DAG.getConstant(5, MVT::i32)); 8051 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr, 8052 MachinePointerInfo(TrmpAddr, 5), 8053 false, false, 1); 8054 8055 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 8056 DAG.getConstant(6, MVT::i32)); 8057 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr, 8058 MachinePointerInfo(TrmpAddr, 6), 8059 false, false, 1); 8060 8061 SDValue Ops[] = 8062 { Trmp, DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 4) }; 8063 return DAG.getMergeValues(Ops, 2, dl); 8064 } 8065} 8066 8067SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, 8068 SelectionDAG &DAG) const { 8069 /* 8070 The rounding mode is in bits 11:10 of FPSR, and has the following 8071 settings: 8072 00 Round to nearest 8073 01 Round to -inf 8074 10 Round to +inf 8075 11 Round to 0 8076 8077 FLT_ROUNDS, on the other hand, expects the following: 8078 -1 Undefined 8079 0 Round to 0 8080 1 Round to nearest 8081 2 Round to +inf 8082 3 Round to -inf 8083 8084 To perform the conversion, we do: 8085 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 8086 */ 8087 8088 MachineFunction &MF = DAG.getMachineFunction(); 8089 const TargetMachine &TM = MF.getTarget(); 8090 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 8091 unsigned StackAlignment = TFI.getStackAlignment(); 8092 EVT VT = Op.getValueType(); 8093 DebugLoc DL = Op.getDebugLoc(); 8094 8095 // Save FP Control Word to stack slot 8096 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false); 8097 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8098 8099 8100 MachineMemOperand *MMO = 8101 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8102 MachineMemOperand::MOStore, 2, 2); 8103 8104 SDValue Ops[] = { DAG.getEntryNode(), StackSlot }; 8105 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL, 8106 DAG.getVTList(MVT::Other), 8107 Ops, 2, MVT::i16, MMO); 8108 8109 // Load FP Control Word from stack slot 8110 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, 8111 MachinePointerInfo(), false, false, 0); 8112 8113 // Transform as necessary 8114 SDValue CWD1 = 8115 DAG.getNode(ISD::SRL, DL, MVT::i16, 8116 DAG.getNode(ISD::AND, DL, MVT::i16, 8117 CWD, DAG.getConstant(0x800, MVT::i16)), 8118 DAG.getConstant(11, MVT::i8)); 8119 SDValue CWD2 = 8120 DAG.getNode(ISD::SRL, DL, MVT::i16, 8121 DAG.getNode(ISD::AND, DL, MVT::i16, 8122 CWD, DAG.getConstant(0x400, MVT::i16)), 8123 DAG.getConstant(9, MVT::i8)); 8124 8125 SDValue RetVal = 8126 DAG.getNode(ISD::AND, DL, MVT::i16, 8127 DAG.getNode(ISD::ADD, DL, MVT::i16, 8128 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2), 8129 DAG.getConstant(1, MVT::i16)), 8130 DAG.getConstant(3, MVT::i16)); 8131 8132 8133 return DAG.getNode((VT.getSizeInBits() < 16 ? 8134 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal); 8135} 8136 8137SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const { 8138 EVT VT = Op.getValueType(); 8139 EVT OpVT = VT; 8140 unsigned NumBits = VT.getSizeInBits(); 8141 DebugLoc dl = Op.getDebugLoc(); 8142 8143 Op = Op.getOperand(0); 8144 if (VT == MVT::i8) { 8145 // Zero extend to i32 since there is not an i8 bsr. 8146 OpVT = MVT::i32; 8147 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 8148 } 8149 8150 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 8151 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 8152 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 8153 8154 // If src is zero (i.e. bsr sets ZF), returns NumBits. 8155 SDValue Ops[] = { 8156 Op, 8157 DAG.getConstant(NumBits+NumBits-1, OpVT), 8158 DAG.getConstant(X86::COND_E, MVT::i8), 8159 Op.getValue(1) 8160 }; 8161 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); 8162 8163 // Finally xor with NumBits-1. 8164 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 8165 8166 if (VT == MVT::i8) 8167 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 8168 return Op; 8169} 8170 8171SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const { 8172 EVT VT = Op.getValueType(); 8173 EVT OpVT = VT; 8174 unsigned NumBits = VT.getSizeInBits(); 8175 DebugLoc dl = Op.getDebugLoc(); 8176 8177 Op = Op.getOperand(0); 8178 if (VT == MVT::i8) { 8179 OpVT = MVT::i32; 8180 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 8181 } 8182 8183 // Issue a bsf (scan bits forward) which also sets EFLAGS. 8184 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 8185 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op); 8186 8187 // If src is zero (i.e. bsf sets ZF), returns NumBits. 8188 SDValue Ops[] = { 8189 Op, 8190 DAG.getConstant(NumBits, OpVT), 8191 DAG.getConstant(X86::COND_E, MVT::i8), 8192 Op.getValue(1) 8193 }; 8194 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); 8195 8196 if (VT == MVT::i8) 8197 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 8198 return Op; 8199} 8200 8201SDValue X86TargetLowering::LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) const { 8202 EVT VT = Op.getValueType(); 8203 assert(VT == MVT::v2i64 && "Only know how to lower V2I64 multiply"); 8204 DebugLoc dl = Op.getDebugLoc(); 8205 8206 // ulong2 Ahi = __builtin_ia32_psrlqi128( a, 32); 8207 // ulong2 Bhi = __builtin_ia32_psrlqi128( b, 32); 8208 // ulong2 AloBlo = __builtin_ia32_pmuludq128( a, b ); 8209 // ulong2 AloBhi = __builtin_ia32_pmuludq128( a, Bhi ); 8210 // ulong2 AhiBlo = __builtin_ia32_pmuludq128( Ahi, b ); 8211 // 8212 // AloBhi = __builtin_ia32_psllqi128( AloBhi, 32 ); 8213 // AhiBlo = __builtin_ia32_psllqi128( AhiBlo, 32 ); 8214 // return AloBlo + AloBhi + AhiBlo; 8215 8216 SDValue A = Op.getOperand(0); 8217 SDValue B = Op.getOperand(1); 8218 8219 SDValue Ahi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8220 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32), 8221 A, DAG.getConstant(32, MVT::i32)); 8222 SDValue Bhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8223 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32), 8224 B, DAG.getConstant(32, MVT::i32)); 8225 SDValue AloBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8226 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32), 8227 A, B); 8228 SDValue AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8229 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32), 8230 A, Bhi); 8231 SDValue AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8232 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32), 8233 Ahi, B); 8234 AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8235 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32), 8236 AloBhi, DAG.getConstant(32, MVT::i32)); 8237 AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8238 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32), 8239 AhiBlo, DAG.getConstant(32, MVT::i32)); 8240 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi); 8241 Res = DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo); 8242 return Res; 8243} 8244 8245SDValue X86TargetLowering::LowerSHL(SDValue Op, SelectionDAG &DAG) const { 8246 EVT VT = Op.getValueType(); 8247 DebugLoc dl = Op.getDebugLoc(); 8248 SDValue R = Op.getOperand(0); 8249 8250 LLVMContext *Context = DAG.getContext(); 8251 8252 assert(Subtarget->hasSSE41() && "Cannot lower SHL without SSE4.1 or later"); 8253 8254 if (VT == MVT::v4i32) { 8255 Op = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8256 DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32), 8257 Op.getOperand(1), DAG.getConstant(23, MVT::i32)); 8258 8259 ConstantInt *CI = ConstantInt::get(*Context, APInt(32, 0x3f800000U)); 8260 8261 std::vector<Constant*> CV(4, CI); 8262 Constant *C = ConstantVector::get(CV); 8263 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8264 SDValue Addend = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8265 MachinePointerInfo::getConstantPool(), 8266 false, false, 16); 8267 8268 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend); 8269 Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32, Op); 8270 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op); 8271 return DAG.getNode(ISD::MUL, dl, VT, Op, R); 8272 } 8273 if (VT == MVT::v16i8) { 8274 // a = a << 5; 8275 Op = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8276 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), 8277 Op.getOperand(1), DAG.getConstant(5, MVT::i32)); 8278 8279 ConstantInt *CM1 = ConstantInt::get(*Context, APInt(8, 15)); 8280 ConstantInt *CM2 = ConstantInt::get(*Context, APInt(8, 63)); 8281 8282 std::vector<Constant*> CVM1(16, CM1); 8283 std::vector<Constant*> CVM2(16, CM2); 8284 Constant *C = ConstantVector::get(CVM1); 8285 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8286 SDValue M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8287 MachinePointerInfo::getConstantPool(), 8288 false, false, 16); 8289 8290 // r = pblendv(r, psllw(r & (char16)15, 4), a); 8291 M = DAG.getNode(ISD::AND, dl, VT, R, M); 8292 M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8293 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M, 8294 DAG.getConstant(4, MVT::i32)); 8295 R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8296 DAG.getConstant(Intrinsic::x86_sse41_pblendvb, MVT::i32), 8297 R, M, Op); 8298 // a += a 8299 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 8300 8301 C = ConstantVector::get(CVM2); 8302 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8303 M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8304 MachinePointerInfo::getConstantPool(), 8305 false, false, 16); 8306 8307 // r = pblendv(r, psllw(r & (char16)63, 2), a); 8308 M = DAG.getNode(ISD::AND, dl, VT, R, M); 8309 M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8310 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M, 8311 DAG.getConstant(2, MVT::i32)); 8312 R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8313 DAG.getConstant(Intrinsic::x86_sse41_pblendvb, MVT::i32), 8314 R, M, Op); 8315 // a += a 8316 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 8317 8318 // return pblendv(r, r+r, a); 8319 R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8320 DAG.getConstant(Intrinsic::x86_sse41_pblendvb, MVT::i32), 8321 R, DAG.getNode(ISD::ADD, dl, VT, R, R), Op); 8322 return R; 8323 } 8324 return SDValue(); 8325} 8326 8327SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const { 8328 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus 8329 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering 8330 // looks for this combo and may remove the "setcc" instruction if the "setcc" 8331 // has only one use. 8332 SDNode *N = Op.getNode(); 8333 SDValue LHS = N->getOperand(0); 8334 SDValue RHS = N->getOperand(1); 8335 unsigned BaseOp = 0; 8336 unsigned Cond = 0; 8337 DebugLoc dl = Op.getDebugLoc(); 8338 8339 switch (Op.getOpcode()) { 8340 default: llvm_unreachable("Unknown ovf instruction!"); 8341 case ISD::SADDO: 8342 // A subtract of one will be selected as a INC. Note that INC doesn't 8343 // set CF, so we can't do this for UADDO. 8344 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 8345 if (C->getAPIntValue() == 1) { 8346 BaseOp = X86ISD::INC; 8347 Cond = X86::COND_O; 8348 break; 8349 } 8350 BaseOp = X86ISD::ADD; 8351 Cond = X86::COND_O; 8352 break; 8353 case ISD::UADDO: 8354 BaseOp = X86ISD::ADD; 8355 Cond = X86::COND_B; 8356 break; 8357 case ISD::SSUBO: 8358 // A subtract of one will be selected as a DEC. Note that DEC doesn't 8359 // set CF, so we can't do this for USUBO. 8360 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 8361 if (C->getAPIntValue() == 1) { 8362 BaseOp = X86ISD::DEC; 8363 Cond = X86::COND_O; 8364 break; 8365 } 8366 BaseOp = X86ISD::SUB; 8367 Cond = X86::COND_O; 8368 break; 8369 case ISD::USUBO: 8370 BaseOp = X86ISD::SUB; 8371 Cond = X86::COND_B; 8372 break; 8373 case ISD::SMULO: 8374 BaseOp = X86ISD::SMUL; 8375 Cond = X86::COND_O; 8376 break; 8377 case ISD::UMULO: 8378 BaseOp = X86ISD::UMUL; 8379 Cond = X86::COND_B; 8380 break; 8381 } 8382 8383 // Also sets EFLAGS. 8384 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); 8385 SDValue Sum = DAG.getNode(BaseOp, dl, VTs, LHS, RHS); 8386 8387 SDValue SetCC = 8388 DAG.getNode(X86ISD::SETCC, dl, N->getValueType(1), 8389 DAG.getConstant(Cond, MVT::i32), SDValue(Sum.getNode(), 1)); 8390 8391 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), SetCC); 8392 return Sum; 8393} 8394 8395SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{ 8396 DebugLoc dl = Op.getDebugLoc(); 8397 8398 if (!Subtarget->hasSSE2()) { 8399 SDValue Chain = Op.getOperand(0); 8400 SDValue Zero = DAG.getConstant(0, 8401 Subtarget->is64Bit() ? MVT::i64 : MVT::i32); 8402 SDValue Ops[] = { 8403 DAG.getRegister(X86::ESP, MVT::i32), // Base 8404 DAG.getTargetConstant(1, MVT::i8), // Scale 8405 DAG.getRegister(0, MVT::i32), // Index 8406 DAG.getTargetConstant(0, MVT::i32), // Disp 8407 DAG.getRegister(0, MVT::i32), // Segment. 8408 Zero, 8409 Chain 8410 }; 8411 SDNode *Res = 8412 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, 8413 array_lengthof(Ops)); 8414 return SDValue(Res, 0); 8415 } 8416 8417 unsigned isDev = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); 8418 if (!isDev) 8419 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 8420 8421 unsigned Op1 = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 8422 unsigned Op2 = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 8423 unsigned Op3 = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 8424 unsigned Op4 = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 8425 8426 // def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>; 8427 if (!Op1 && !Op2 && !Op3 && Op4) 8428 return DAG.getNode(X86ISD::SFENCE, dl, MVT::Other, Op.getOperand(0)); 8429 8430 // def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>; 8431 if (Op1 && !Op2 && !Op3 && !Op4) 8432 return DAG.getNode(X86ISD::LFENCE, dl, MVT::Other, Op.getOperand(0)); 8433 8434 // def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)), 8435 // (MFENCE)>; 8436 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 8437} 8438 8439SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 8440 EVT T = Op.getValueType(); 8441 DebugLoc DL = Op.getDebugLoc(); 8442 unsigned Reg = 0; 8443 unsigned size = 0; 8444 switch(T.getSimpleVT().SimpleTy) { 8445 default: 8446 assert(false && "Invalid value type!"); 8447 case MVT::i8: Reg = X86::AL; size = 1; break; 8448 case MVT::i16: Reg = X86::AX; size = 2; break; 8449 case MVT::i32: Reg = X86::EAX; size = 4; break; 8450 case MVT::i64: 8451 assert(Subtarget->is64Bit() && "Node not type legal!"); 8452 Reg = X86::RAX; size = 8; 8453 break; 8454 } 8455 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg, 8456 Op.getOperand(2), SDValue()); 8457 SDValue Ops[] = { cpIn.getValue(0), 8458 Op.getOperand(1), 8459 Op.getOperand(3), 8460 DAG.getTargetConstant(size, MVT::i8), 8461 cpIn.getValue(1) }; 8462 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 8463 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand(); 8464 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys, 8465 Ops, 5, T, MMO); 8466 SDValue cpOut = 8467 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1)); 8468 return cpOut; 8469} 8470 8471SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op, 8472 SelectionDAG &DAG) const { 8473 assert(Subtarget->is64Bit() && "Result not type legalized?"); 8474 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 8475 SDValue TheChain = Op.getOperand(0); 8476 DebugLoc dl = Op.getDebugLoc(); 8477 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 8478 SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1)); 8479 SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64, 8480 rax.getValue(2)); 8481 SDValue Tmp = DAG.getNode(ISD::SHL, dl, MVT::i64, rdx, 8482 DAG.getConstant(32, MVT::i8)); 8483 SDValue Ops[] = { 8484 DAG.getNode(ISD::OR, dl, MVT::i64, rax, Tmp), 8485 rdx.getValue(1) 8486 }; 8487 return DAG.getMergeValues(Ops, 2, dl); 8488} 8489 8490SDValue X86TargetLowering::LowerBIT_CONVERT(SDValue Op, 8491 SelectionDAG &DAG) const { 8492 EVT SrcVT = Op.getOperand(0).getValueType(); 8493 EVT DstVT = Op.getValueType(); 8494 assert((Subtarget->is64Bit() && !Subtarget->hasSSE2() && 8495 Subtarget->hasMMX() && !DisableMMX) && 8496 "Unexpected custom BIT_CONVERT"); 8497 assert((DstVT == MVT::i64 || 8498 (DstVT.isVector() && DstVT.getSizeInBits()==64)) && 8499 "Unexpected custom BIT_CONVERT"); 8500 // i64 <=> MMX conversions are Legal. 8501 if (SrcVT==MVT::i64 && DstVT.isVector()) 8502 return Op; 8503 if (DstVT==MVT::i64 && SrcVT.isVector()) 8504 return Op; 8505 // MMX <=> MMX conversions are Legal. 8506 if (SrcVT.isVector() && DstVT.isVector()) 8507 return Op; 8508 // All other conversions need to be expanded. 8509 return SDValue(); 8510} 8511SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const { 8512 SDNode *Node = Op.getNode(); 8513 DebugLoc dl = Node->getDebugLoc(); 8514 EVT T = Node->getValueType(0); 8515 SDValue negOp = DAG.getNode(ISD::SUB, dl, T, 8516 DAG.getConstant(0, T), Node->getOperand(2)); 8517 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, 8518 cast<AtomicSDNode>(Node)->getMemoryVT(), 8519 Node->getOperand(0), 8520 Node->getOperand(1), negOp, 8521 cast<AtomicSDNode>(Node)->getSrcValue(), 8522 cast<AtomicSDNode>(Node)->getAlignment()); 8523} 8524 8525/// LowerOperation - Provide custom lowering hooks for some operations. 8526/// 8527SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 8528 switch (Op.getOpcode()) { 8529 default: llvm_unreachable("Should not custom lower this!"); 8530 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op,DAG); 8531 case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG); 8532 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG); 8533 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 8534 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 8535 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 8536 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 8537 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 8538 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 8539 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 8540 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 8541 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 8542 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 8543 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 8544 case ISD::SHL_PARTS: 8545 case ISD::SRA_PARTS: 8546 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 8547 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 8548 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 8549 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 8550 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 8551 case ISD::FABS: return LowerFABS(Op, DAG); 8552 case ISD::FNEG: return LowerFNEG(Op, DAG); 8553 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 8554 case ISD::SETCC: return LowerSETCC(Op, DAG); 8555 case ISD::VSETCC: return LowerVSETCC(Op, DAG); 8556 case ISD::SELECT: return LowerSELECT(Op, DAG); 8557 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 8558 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 8559 case ISD::VASTART: return LowerVASTART(Op, DAG); 8560 case ISD::VAARG: return LowerVAARG(Op, DAG); 8561 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 8562 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 8563 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 8564 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 8565 case ISD::FRAME_TO_ARGS_OFFSET: 8566 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 8567 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 8568 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 8569 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 8570 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 8571 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 8572 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 8573 case ISD::MUL: return LowerMUL_V2I64(Op, DAG); 8574 case ISD::SHL: return LowerSHL(Op, DAG); 8575 case ISD::SADDO: 8576 case ISD::UADDO: 8577 case ISD::SSUBO: 8578 case ISD::USUBO: 8579 case ISD::SMULO: 8580 case ISD::UMULO: return LowerXALUO(Op, DAG); 8581 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG); 8582 case ISD::BIT_CONVERT: return LowerBIT_CONVERT(Op, DAG); 8583 } 8584} 8585 8586void X86TargetLowering:: 8587ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results, 8588 SelectionDAG &DAG, unsigned NewOp) const { 8589 EVT T = Node->getValueType(0); 8590 DebugLoc dl = Node->getDebugLoc(); 8591 assert (T == MVT::i64 && "Only know how to expand i64 atomics"); 8592 8593 SDValue Chain = Node->getOperand(0); 8594 SDValue In1 = Node->getOperand(1); 8595 SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 8596 Node->getOperand(2), DAG.getIntPtrConstant(0)); 8597 SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 8598 Node->getOperand(2), DAG.getIntPtrConstant(1)); 8599 SDValue Ops[] = { Chain, In1, In2L, In2H }; 8600 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 8601 SDValue Result = 8602 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops, 4, MVT::i64, 8603 cast<MemSDNode>(Node)->getMemOperand()); 8604 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)}; 8605 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 8606 Results.push_back(Result.getValue(2)); 8607} 8608 8609/// ReplaceNodeResults - Replace a node with an illegal result type 8610/// with a new node built out of custom code. 8611void X86TargetLowering::ReplaceNodeResults(SDNode *N, 8612 SmallVectorImpl<SDValue>&Results, 8613 SelectionDAG &DAG) const { 8614 DebugLoc dl = N->getDebugLoc(); 8615 switch (N->getOpcode()) { 8616 default: 8617 assert(false && "Do not know how to custom type legalize this operation!"); 8618 return; 8619 case ISD::FP_TO_SINT: { 8620 std::pair<SDValue,SDValue> Vals = 8621 FP_TO_INTHelper(SDValue(N, 0), DAG, true); 8622 SDValue FIST = Vals.first, StackSlot = Vals.second; 8623 if (FIST.getNode() != 0) { 8624 EVT VT = N->getValueType(0); 8625 // Return a load from the stack slot. 8626 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, 8627 MachinePointerInfo(), false, false, 0)); 8628 } 8629 return; 8630 } 8631 case ISD::READCYCLECOUNTER: { 8632 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 8633 SDValue TheChain = N->getOperand(0); 8634 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 8635 SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32, 8636 rd.getValue(1)); 8637 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, MVT::i32, 8638 eax.getValue(2)); 8639 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 8640 SDValue Ops[] = { eax, edx }; 8641 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops, 2)); 8642 Results.push_back(edx.getValue(1)); 8643 return; 8644 } 8645 case ISD::ATOMIC_CMP_SWAP: { 8646 EVT T = N->getValueType(0); 8647 assert (T == MVT::i64 && "Only know how to expand i64 Cmp and Swap"); 8648 SDValue cpInL, cpInH; 8649 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(2), 8650 DAG.getConstant(0, MVT::i32)); 8651 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(2), 8652 DAG.getConstant(1, MVT::i32)); 8653 cpInL = DAG.getCopyToReg(N->getOperand(0), dl, X86::EAX, cpInL, SDValue()); 8654 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl, X86::EDX, cpInH, 8655 cpInL.getValue(1)); 8656 SDValue swapInL, swapInH; 8657 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(3), 8658 DAG.getConstant(0, MVT::i32)); 8659 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(3), 8660 DAG.getConstant(1, MVT::i32)); 8661 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl, X86::EBX, swapInL, 8662 cpInH.getValue(1)); 8663 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl, X86::ECX, swapInH, 8664 swapInL.getValue(1)); 8665 SDValue Ops[] = { swapInH.getValue(0), 8666 N->getOperand(1), 8667 swapInH.getValue(1) }; 8668 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 8669 SDValue Result = DAG.getNode(X86ISD::LCMPXCHG8_DAG, dl, Tys, Ops, 3); 8670 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, X86::EAX, 8671 MVT::i32, Result.getValue(1)); 8672 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl, X86::EDX, 8673 MVT::i32, cpOutL.getValue(2)); 8674 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 8675 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 8676 Results.push_back(cpOutH.getValue(1)); 8677 return; 8678 } 8679 case ISD::ATOMIC_LOAD_ADD: 8680 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMADD64_DAG); 8681 return; 8682 case ISD::ATOMIC_LOAD_AND: 8683 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMAND64_DAG); 8684 return; 8685 case ISD::ATOMIC_LOAD_NAND: 8686 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMNAND64_DAG); 8687 return; 8688 case ISD::ATOMIC_LOAD_OR: 8689 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMOR64_DAG); 8690 return; 8691 case ISD::ATOMIC_LOAD_SUB: 8692 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSUB64_DAG); 8693 return; 8694 case ISD::ATOMIC_LOAD_XOR: 8695 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMXOR64_DAG); 8696 return; 8697 case ISD::ATOMIC_SWAP: 8698 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSWAP64_DAG); 8699 return; 8700 } 8701} 8702 8703const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 8704 switch (Opcode) { 8705 default: return NULL; 8706 case X86ISD::BSF: return "X86ISD::BSF"; 8707 case X86ISD::BSR: return "X86ISD::BSR"; 8708 case X86ISD::SHLD: return "X86ISD::SHLD"; 8709 case X86ISD::SHRD: return "X86ISD::SHRD"; 8710 case X86ISD::FAND: return "X86ISD::FAND"; 8711 case X86ISD::FOR: return "X86ISD::FOR"; 8712 case X86ISD::FXOR: return "X86ISD::FXOR"; 8713 case X86ISD::FSRL: return "X86ISD::FSRL"; 8714 case X86ISD::FILD: return "X86ISD::FILD"; 8715 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 8716 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 8717 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 8718 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 8719 case X86ISD::FLD: return "X86ISD::FLD"; 8720 case X86ISD::FST: return "X86ISD::FST"; 8721 case X86ISD::CALL: return "X86ISD::CALL"; 8722 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 8723 case X86ISD::BT: return "X86ISD::BT"; 8724 case X86ISD::CMP: return "X86ISD::CMP"; 8725 case X86ISD::COMI: return "X86ISD::COMI"; 8726 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 8727 case X86ISD::SETCC: return "X86ISD::SETCC"; 8728 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY"; 8729 case X86ISD::CMOV: return "X86ISD::CMOV"; 8730 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 8731 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 8732 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 8733 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 8734 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 8735 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 8736 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP"; 8737 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 8738 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 8739 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 8740 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 8741 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 8742 case X86ISD::PSHUFB: return "X86ISD::PSHUFB"; 8743 case X86ISD::FMAX: return "X86ISD::FMAX"; 8744 case X86ISD::FMIN: return "X86ISD::FMIN"; 8745 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 8746 case X86ISD::FRCP: return "X86ISD::FRCP"; 8747 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 8748 case X86ISD::TLSCALL: return "X86ISD::TLSCALL"; 8749 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 8750 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 8751 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 8752 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG"; 8753 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG"; 8754 case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG"; 8755 case X86ISD::ATOMSUB64_DAG: return "X86ISD::ATOMSUB64_DAG"; 8756 case X86ISD::ATOMOR64_DAG: return "X86ISD::ATOMOR64_DAG"; 8757 case X86ISD::ATOMXOR64_DAG: return "X86ISD::ATOMXOR64_DAG"; 8758 case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG"; 8759 case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG"; 8760 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL"; 8761 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD"; 8762 case X86ISD::VSHL: return "X86ISD::VSHL"; 8763 case X86ISD::VSRL: return "X86ISD::VSRL"; 8764 case X86ISD::CMPPD: return "X86ISD::CMPPD"; 8765 case X86ISD::CMPPS: return "X86ISD::CMPPS"; 8766 case X86ISD::PCMPEQB: return "X86ISD::PCMPEQB"; 8767 case X86ISD::PCMPEQW: return "X86ISD::PCMPEQW"; 8768 case X86ISD::PCMPEQD: return "X86ISD::PCMPEQD"; 8769 case X86ISD::PCMPEQQ: return "X86ISD::PCMPEQQ"; 8770 case X86ISD::PCMPGTB: return "X86ISD::PCMPGTB"; 8771 case X86ISD::PCMPGTW: return "X86ISD::PCMPGTW"; 8772 case X86ISD::PCMPGTD: return "X86ISD::PCMPGTD"; 8773 case X86ISD::PCMPGTQ: return "X86ISD::PCMPGTQ"; 8774 case X86ISD::ADD: return "X86ISD::ADD"; 8775 case X86ISD::SUB: return "X86ISD::SUB"; 8776 case X86ISD::SMUL: return "X86ISD::SMUL"; 8777 case X86ISD::UMUL: return "X86ISD::UMUL"; 8778 case X86ISD::INC: return "X86ISD::INC"; 8779 case X86ISD::DEC: return "X86ISD::DEC"; 8780 case X86ISD::OR: return "X86ISD::OR"; 8781 case X86ISD::XOR: return "X86ISD::XOR"; 8782 case X86ISD::AND: return "X86ISD::AND"; 8783 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM"; 8784 case X86ISD::PTEST: return "X86ISD::PTEST"; 8785 case X86ISD::TESTP: return "X86ISD::TESTP"; 8786 case X86ISD::PALIGN: return "X86ISD::PALIGN"; 8787 case X86ISD::PSHUFD: return "X86ISD::PSHUFD"; 8788 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW"; 8789 case X86ISD::PSHUFHW_LD: return "X86ISD::PSHUFHW_LD"; 8790 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW"; 8791 case X86ISD::PSHUFLW_LD: return "X86ISD::PSHUFLW_LD"; 8792 case X86ISD::SHUFPS: return "X86ISD::SHUFPS"; 8793 case X86ISD::SHUFPD: return "X86ISD::SHUFPD"; 8794 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS"; 8795 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD"; 8796 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS"; 8797 case X86ISD::MOVHLPD: return "X86ISD::MOVHLPD"; 8798 case X86ISD::MOVLPS: return "X86ISD::MOVLPS"; 8799 case X86ISD::MOVLPD: return "X86ISD::MOVLPD"; 8800 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP"; 8801 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP"; 8802 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP"; 8803 case X86ISD::MOVSHDUP_LD: return "X86ISD::MOVSHDUP_LD"; 8804 case X86ISD::MOVSLDUP_LD: return "X86ISD::MOVSLDUP_LD"; 8805 case X86ISD::MOVSD: return "X86ISD::MOVSD"; 8806 case X86ISD::MOVSS: return "X86ISD::MOVSS"; 8807 case X86ISD::UNPCKLPS: return "X86ISD::UNPCKLPS"; 8808 case X86ISD::UNPCKLPD: return "X86ISD::UNPCKLPD"; 8809 case X86ISD::UNPCKHPS: return "X86ISD::UNPCKHPS"; 8810 case X86ISD::UNPCKHPD: return "X86ISD::UNPCKHPD"; 8811 case X86ISD::PUNPCKLBW: return "X86ISD::PUNPCKLBW"; 8812 case X86ISD::PUNPCKLWD: return "X86ISD::PUNPCKLWD"; 8813 case X86ISD::PUNPCKLDQ: return "X86ISD::PUNPCKLDQ"; 8814 case X86ISD::PUNPCKLQDQ: return "X86ISD::PUNPCKLQDQ"; 8815 case X86ISD::PUNPCKHBW: return "X86ISD::PUNPCKHBW"; 8816 case X86ISD::PUNPCKHWD: return "X86ISD::PUNPCKHWD"; 8817 case X86ISD::PUNPCKHDQ: return "X86ISD::PUNPCKHDQ"; 8818 case X86ISD::PUNPCKHQDQ: return "X86ISD::PUNPCKHQDQ"; 8819 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS"; 8820 case X86ISD::MINGW_ALLOCA: return "X86ISD::MINGW_ALLOCA"; 8821 } 8822} 8823 8824// isLegalAddressingMode - Return true if the addressing mode represented 8825// by AM is legal for this target, for a load/store of the specified type. 8826bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 8827 const Type *Ty) const { 8828 // X86 supports extremely general addressing modes. 8829 CodeModel::Model M = getTargetMachine().getCodeModel(); 8830 Reloc::Model R = getTargetMachine().getRelocationModel(); 8831 8832 // X86 allows a sign-extended 32-bit immediate field as a displacement. 8833 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL)) 8834 return false; 8835 8836 if (AM.BaseGV) { 8837 unsigned GVFlags = 8838 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine()); 8839 8840 // If a reference to this global requires an extra load, we can't fold it. 8841 if (isGlobalStubReference(GVFlags)) 8842 return false; 8843 8844 // If BaseGV requires a register for the PIC base, we cannot also have a 8845 // BaseReg specified. 8846 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags)) 8847 return false; 8848 8849 // If lower 4G is not available, then we must use rip-relative addressing. 8850 if ((M != CodeModel::Small || R != Reloc::Static) && 8851 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1)) 8852 return false; 8853 } 8854 8855 switch (AM.Scale) { 8856 case 0: 8857 case 1: 8858 case 2: 8859 case 4: 8860 case 8: 8861 // These scales always work. 8862 break; 8863 case 3: 8864 case 5: 8865 case 9: 8866 // These scales are formed with basereg+scalereg. Only accept if there is 8867 // no basereg yet. 8868 if (AM.HasBaseReg) 8869 return false; 8870 break; 8871 default: // Other stuff never works. 8872 return false; 8873 } 8874 8875 return true; 8876} 8877 8878 8879bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const { 8880 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 8881 return false; 8882 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 8883 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 8884 if (NumBits1 <= NumBits2) 8885 return false; 8886 return true; 8887} 8888 8889bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 8890 if (!VT1.isInteger() || !VT2.isInteger()) 8891 return false; 8892 unsigned NumBits1 = VT1.getSizeInBits(); 8893 unsigned NumBits2 = VT2.getSizeInBits(); 8894 if (NumBits1 <= NumBits2) 8895 return false; 8896 return true; 8897} 8898 8899bool X86TargetLowering::isZExtFree(const Type *Ty1, const Type *Ty2) const { 8900 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 8901 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit(); 8902} 8903 8904bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { 8905 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 8906 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit(); 8907} 8908 8909bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const { 8910 // i16 instructions are longer (0x66 prefix) and potentially slower. 8911 return !(VT1 == MVT::i32 && VT2 == MVT::i16); 8912} 8913 8914/// isShuffleMaskLegal - Targets can use this to indicate that they only 8915/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 8916/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 8917/// are assumed to be legal. 8918bool 8919X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 8920 EVT VT) const { 8921 // Very little shuffling can be done for 64-bit vectors right now. 8922 if (VT.getSizeInBits() == 64) 8923 return isPALIGNRMask(M, VT, Subtarget->hasSSSE3()); 8924 8925 // FIXME: pshufb, blends, shifts. 8926 return (VT.getVectorNumElements() == 2 || 8927 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 8928 isMOVLMask(M, VT) || 8929 isSHUFPMask(M, VT) || 8930 isPSHUFDMask(M, VT) || 8931 isPSHUFHWMask(M, VT) || 8932 isPSHUFLWMask(M, VT) || 8933 isPALIGNRMask(M, VT, Subtarget->hasSSSE3()) || 8934 isUNPCKLMask(M, VT) || 8935 isUNPCKHMask(M, VT) || 8936 isUNPCKL_v_undef_Mask(M, VT) || 8937 isUNPCKH_v_undef_Mask(M, VT)); 8938} 8939 8940bool 8941X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, 8942 EVT VT) const { 8943 unsigned NumElts = VT.getVectorNumElements(); 8944 // FIXME: This collection of masks seems suspect. 8945 if (NumElts == 2) 8946 return true; 8947 if (NumElts == 4 && VT.getSizeInBits() == 128) { 8948 return (isMOVLMask(Mask, VT) || 8949 isCommutedMOVLMask(Mask, VT, true) || 8950 isSHUFPMask(Mask, VT) || 8951 isCommutedSHUFPMask(Mask, VT)); 8952 } 8953 return false; 8954} 8955 8956//===----------------------------------------------------------------------===// 8957// X86 Scheduler Hooks 8958//===----------------------------------------------------------------------===// 8959 8960// private utility function 8961MachineBasicBlock * 8962X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr, 8963 MachineBasicBlock *MBB, 8964 unsigned regOpc, 8965 unsigned immOpc, 8966 unsigned LoadOpc, 8967 unsigned CXchgOpc, 8968 unsigned notOpc, 8969 unsigned EAXreg, 8970 TargetRegisterClass *RC, 8971 bool invSrc) const { 8972 // For the atomic bitwise operator, we generate 8973 // thisMBB: 8974 // newMBB: 8975 // ld t1 = [bitinstr.addr] 8976 // op t2 = t1, [bitinstr.val] 8977 // mov EAX = t1 8978 // lcs dest = [bitinstr.addr], t2 [EAX is implicit] 8979 // bz newMBB 8980 // fallthrough -->nextMBB 8981 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 8982 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 8983 MachineFunction::iterator MBBIter = MBB; 8984 ++MBBIter; 8985 8986 /// First build the CFG 8987 MachineFunction *F = MBB->getParent(); 8988 MachineBasicBlock *thisMBB = MBB; 8989 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 8990 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 8991 F->insert(MBBIter, newMBB); 8992 F->insert(MBBIter, nextMBB); 8993 8994 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 8995 nextMBB->splice(nextMBB->begin(), thisMBB, 8996 llvm::next(MachineBasicBlock::iterator(bInstr)), 8997 thisMBB->end()); 8998 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 8999 9000 // Update thisMBB to fall through to newMBB 9001 thisMBB->addSuccessor(newMBB); 9002 9003 // newMBB jumps to itself and fall through to nextMBB 9004 newMBB->addSuccessor(nextMBB); 9005 newMBB->addSuccessor(newMBB); 9006 9007 // Insert instructions into newMBB based on incoming instruction 9008 assert(bInstr->getNumOperands() < X86::AddrNumOperands + 4 && 9009 "unexpected number of operands"); 9010 DebugLoc dl = bInstr->getDebugLoc(); 9011 MachineOperand& destOper = bInstr->getOperand(0); 9012 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 9013 int numArgs = bInstr->getNumOperands() - 1; 9014 for (int i=0; i < numArgs; ++i) 9015 argOpers[i] = &bInstr->getOperand(i+1); 9016 9017 // x86 address has 4 operands: base, index, scale, and displacement 9018 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 9019 int valArgIndx = lastAddrIndx + 1; 9020 9021 unsigned t1 = F->getRegInfo().createVirtualRegister(RC); 9022 MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(LoadOpc), t1); 9023 for (int i=0; i <= lastAddrIndx; ++i) 9024 (*MIB).addOperand(*argOpers[i]); 9025 9026 unsigned tt = F->getRegInfo().createVirtualRegister(RC); 9027 if (invSrc) { 9028 MIB = BuildMI(newMBB, dl, TII->get(notOpc), tt).addReg(t1); 9029 } 9030 else 9031 tt = t1; 9032 9033 unsigned t2 = F->getRegInfo().createVirtualRegister(RC); 9034 assert((argOpers[valArgIndx]->isReg() || 9035 argOpers[valArgIndx]->isImm()) && 9036 "invalid operand"); 9037 if (argOpers[valArgIndx]->isReg()) 9038 MIB = BuildMI(newMBB, dl, TII->get(regOpc), t2); 9039 else 9040 MIB = BuildMI(newMBB, dl, TII->get(immOpc), t2); 9041 MIB.addReg(tt); 9042 (*MIB).addOperand(*argOpers[valArgIndx]); 9043 9044 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), EAXreg); 9045 MIB.addReg(t1); 9046 9047 MIB = BuildMI(newMBB, dl, TII->get(CXchgOpc)); 9048 for (int i=0; i <= lastAddrIndx; ++i) 9049 (*MIB).addOperand(*argOpers[i]); 9050 MIB.addReg(t2); 9051 assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 9052 (*MIB).setMemRefs(bInstr->memoperands_begin(), 9053 bInstr->memoperands_end()); 9054 9055 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg()); 9056 MIB.addReg(EAXreg); 9057 9058 // insert branch 9059 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 9060 9061 bInstr->eraseFromParent(); // The pseudo instruction is gone now. 9062 return nextMBB; 9063} 9064 9065// private utility function: 64 bit atomics on 32 bit host. 9066MachineBasicBlock * 9067X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr, 9068 MachineBasicBlock *MBB, 9069 unsigned regOpcL, 9070 unsigned regOpcH, 9071 unsigned immOpcL, 9072 unsigned immOpcH, 9073 bool invSrc) const { 9074 // For the atomic bitwise operator, we generate 9075 // thisMBB (instructions are in pairs, except cmpxchg8b) 9076 // ld t1,t2 = [bitinstr.addr] 9077 // newMBB: 9078 // out1, out2 = phi (thisMBB, t1/t2) (newMBB, t3/t4) 9079 // op t5, t6 <- out1, out2, [bitinstr.val] 9080 // (for SWAP, substitute: mov t5, t6 <- [bitinstr.val]) 9081 // mov ECX, EBX <- t5, t6 9082 // mov EAX, EDX <- t1, t2 9083 // cmpxchg8b [bitinstr.addr] [EAX, EDX, EBX, ECX implicit] 9084 // mov t3, t4 <- EAX, EDX 9085 // bz newMBB 9086 // result in out1, out2 9087 // fallthrough -->nextMBB 9088 9089 const TargetRegisterClass *RC = X86::GR32RegisterClass; 9090 const unsigned LoadOpc = X86::MOV32rm; 9091 const unsigned NotOpc = X86::NOT32r; 9092 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9093 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 9094 MachineFunction::iterator MBBIter = MBB; 9095 ++MBBIter; 9096 9097 /// First build the CFG 9098 MachineFunction *F = MBB->getParent(); 9099 MachineBasicBlock *thisMBB = MBB; 9100 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 9101 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 9102 F->insert(MBBIter, newMBB); 9103 F->insert(MBBIter, nextMBB); 9104 9105 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 9106 nextMBB->splice(nextMBB->begin(), thisMBB, 9107 llvm::next(MachineBasicBlock::iterator(bInstr)), 9108 thisMBB->end()); 9109 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 9110 9111 // Update thisMBB to fall through to newMBB 9112 thisMBB->addSuccessor(newMBB); 9113 9114 // newMBB jumps to itself and fall through to nextMBB 9115 newMBB->addSuccessor(nextMBB); 9116 newMBB->addSuccessor(newMBB); 9117 9118 DebugLoc dl = bInstr->getDebugLoc(); 9119 // Insert instructions into newMBB based on incoming instruction 9120 // There are 8 "real" operands plus 9 implicit def/uses, ignored here. 9121 assert(bInstr->getNumOperands() < X86::AddrNumOperands + 14 && 9122 "unexpected number of operands"); 9123 MachineOperand& dest1Oper = bInstr->getOperand(0); 9124 MachineOperand& dest2Oper = bInstr->getOperand(1); 9125 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 9126 for (int i=0; i < 2 + X86::AddrNumOperands; ++i) { 9127 argOpers[i] = &bInstr->getOperand(i+2); 9128 9129 // We use some of the operands multiple times, so conservatively just 9130 // clear any kill flags that might be present. 9131 if (argOpers[i]->isReg() && argOpers[i]->isUse()) 9132 argOpers[i]->setIsKill(false); 9133 } 9134 9135 // x86 address has 5 operands: base, index, scale, displacement, and segment. 9136 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 9137 9138 unsigned t1 = F->getRegInfo().createVirtualRegister(RC); 9139 MachineInstrBuilder MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t1); 9140 for (int i=0; i <= lastAddrIndx; ++i) 9141 (*MIB).addOperand(*argOpers[i]); 9142 unsigned t2 = F->getRegInfo().createVirtualRegister(RC); 9143 MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t2); 9144 // add 4 to displacement. 9145 for (int i=0; i <= lastAddrIndx-2; ++i) 9146 (*MIB).addOperand(*argOpers[i]); 9147 MachineOperand newOp3 = *(argOpers[3]); 9148 if (newOp3.isImm()) 9149 newOp3.setImm(newOp3.getImm()+4); 9150 else 9151 newOp3.setOffset(newOp3.getOffset()+4); 9152 (*MIB).addOperand(newOp3); 9153 (*MIB).addOperand(*argOpers[lastAddrIndx]); 9154 9155 // t3/4 are defined later, at the bottom of the loop 9156 unsigned t3 = F->getRegInfo().createVirtualRegister(RC); 9157 unsigned t4 = F->getRegInfo().createVirtualRegister(RC); 9158 BuildMI(newMBB, dl, TII->get(X86::PHI), dest1Oper.getReg()) 9159 .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(newMBB); 9160 BuildMI(newMBB, dl, TII->get(X86::PHI), dest2Oper.getReg()) 9161 .addReg(t2).addMBB(thisMBB).addReg(t4).addMBB(newMBB); 9162 9163 // The subsequent operations should be using the destination registers of 9164 //the PHI instructions. 9165 if (invSrc) { 9166 t1 = F->getRegInfo().createVirtualRegister(RC); 9167 t2 = F->getRegInfo().createVirtualRegister(RC); 9168 MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t1).addReg(dest1Oper.getReg()); 9169 MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t2).addReg(dest2Oper.getReg()); 9170 } else { 9171 t1 = dest1Oper.getReg(); 9172 t2 = dest2Oper.getReg(); 9173 } 9174 9175 int valArgIndx = lastAddrIndx + 1; 9176 assert((argOpers[valArgIndx]->isReg() || 9177 argOpers[valArgIndx]->isImm()) && 9178 "invalid operand"); 9179 unsigned t5 = F->getRegInfo().createVirtualRegister(RC); 9180 unsigned t6 = F->getRegInfo().createVirtualRegister(RC); 9181 if (argOpers[valArgIndx]->isReg()) 9182 MIB = BuildMI(newMBB, dl, TII->get(regOpcL), t5); 9183 else 9184 MIB = BuildMI(newMBB, dl, TII->get(immOpcL), t5); 9185 if (regOpcL != X86::MOV32rr) 9186 MIB.addReg(t1); 9187 (*MIB).addOperand(*argOpers[valArgIndx]); 9188 assert(argOpers[valArgIndx + 1]->isReg() == 9189 argOpers[valArgIndx]->isReg()); 9190 assert(argOpers[valArgIndx + 1]->isImm() == 9191 argOpers[valArgIndx]->isImm()); 9192 if (argOpers[valArgIndx + 1]->isReg()) 9193 MIB = BuildMI(newMBB, dl, TII->get(regOpcH), t6); 9194 else 9195 MIB = BuildMI(newMBB, dl, TII->get(immOpcH), t6); 9196 if (regOpcH != X86::MOV32rr) 9197 MIB.addReg(t2); 9198 (*MIB).addOperand(*argOpers[valArgIndx + 1]); 9199 9200 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX); 9201 MIB.addReg(t1); 9202 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EDX); 9203 MIB.addReg(t2); 9204 9205 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EBX); 9206 MIB.addReg(t5); 9207 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::ECX); 9208 MIB.addReg(t6); 9209 9210 MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG8B)); 9211 for (int i=0; i <= lastAddrIndx; ++i) 9212 (*MIB).addOperand(*argOpers[i]); 9213 9214 assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 9215 (*MIB).setMemRefs(bInstr->memoperands_begin(), 9216 bInstr->memoperands_end()); 9217 9218 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t3); 9219 MIB.addReg(X86::EAX); 9220 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t4); 9221 MIB.addReg(X86::EDX); 9222 9223 // insert branch 9224 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 9225 9226 bInstr->eraseFromParent(); // The pseudo instruction is gone now. 9227 return nextMBB; 9228} 9229 9230// private utility function 9231MachineBasicBlock * 9232X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr, 9233 MachineBasicBlock *MBB, 9234 unsigned cmovOpc) const { 9235 // For the atomic min/max operator, we generate 9236 // thisMBB: 9237 // newMBB: 9238 // ld t1 = [min/max.addr] 9239 // mov t2 = [min/max.val] 9240 // cmp t1, t2 9241 // cmov[cond] t2 = t1 9242 // mov EAX = t1 9243 // lcs dest = [bitinstr.addr], t2 [EAX is implicit] 9244 // bz newMBB 9245 // fallthrough -->nextMBB 9246 // 9247 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9248 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 9249 MachineFunction::iterator MBBIter = MBB; 9250 ++MBBIter; 9251 9252 /// First build the CFG 9253 MachineFunction *F = MBB->getParent(); 9254 MachineBasicBlock *thisMBB = MBB; 9255 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 9256 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 9257 F->insert(MBBIter, newMBB); 9258 F->insert(MBBIter, nextMBB); 9259 9260 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 9261 nextMBB->splice(nextMBB->begin(), thisMBB, 9262 llvm::next(MachineBasicBlock::iterator(mInstr)), 9263 thisMBB->end()); 9264 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 9265 9266 // Update thisMBB to fall through to newMBB 9267 thisMBB->addSuccessor(newMBB); 9268 9269 // newMBB jumps to newMBB and fall through to nextMBB 9270 newMBB->addSuccessor(nextMBB); 9271 newMBB->addSuccessor(newMBB); 9272 9273 DebugLoc dl = mInstr->getDebugLoc(); 9274 // Insert instructions into newMBB based on incoming instruction 9275 assert(mInstr->getNumOperands() < X86::AddrNumOperands + 4 && 9276 "unexpected number of operands"); 9277 MachineOperand& destOper = mInstr->getOperand(0); 9278 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 9279 int numArgs = mInstr->getNumOperands() - 1; 9280 for (int i=0; i < numArgs; ++i) 9281 argOpers[i] = &mInstr->getOperand(i+1); 9282 9283 // x86 address has 4 operands: base, index, scale, and displacement 9284 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 9285 int valArgIndx = lastAddrIndx + 1; 9286 9287 unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 9288 MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rm), t1); 9289 for (int i=0; i <= lastAddrIndx; ++i) 9290 (*MIB).addOperand(*argOpers[i]); 9291 9292 // We only support register and immediate values 9293 assert((argOpers[valArgIndx]->isReg() || 9294 argOpers[valArgIndx]->isImm()) && 9295 "invalid operand"); 9296 9297 unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 9298 if (argOpers[valArgIndx]->isReg()) 9299 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t2); 9300 else 9301 MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), t2); 9302 (*MIB).addOperand(*argOpers[valArgIndx]); 9303 9304 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX); 9305 MIB.addReg(t1); 9306 9307 MIB = BuildMI(newMBB, dl, TII->get(X86::CMP32rr)); 9308 MIB.addReg(t1); 9309 MIB.addReg(t2); 9310 9311 // Generate movc 9312 unsigned t3 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 9313 MIB = BuildMI(newMBB, dl, TII->get(cmovOpc),t3); 9314 MIB.addReg(t2); 9315 MIB.addReg(t1); 9316 9317 // Cmp and exchange if none has modified the memory location 9318 MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG32)); 9319 for (int i=0; i <= lastAddrIndx; ++i) 9320 (*MIB).addOperand(*argOpers[i]); 9321 MIB.addReg(t3); 9322 assert(mInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 9323 (*MIB).setMemRefs(mInstr->memoperands_begin(), 9324 mInstr->memoperands_end()); 9325 9326 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg()); 9327 MIB.addReg(X86::EAX); 9328 9329 // insert branch 9330 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 9331 9332 mInstr->eraseFromParent(); // The pseudo instruction is gone now. 9333 return nextMBB; 9334} 9335 9336// FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8 9337// or XMM0_V32I8 in AVX all of this code can be replaced with that 9338// in the .td file. 9339MachineBasicBlock * 9340X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB, 9341 unsigned numArgs, bool memArg) const { 9342 9343 assert((Subtarget->hasSSE42() || Subtarget->hasAVX()) && 9344 "Target must have SSE4.2 or AVX features enabled"); 9345 9346 DebugLoc dl = MI->getDebugLoc(); 9347 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9348 9349 unsigned Opc; 9350 9351 if (!Subtarget->hasAVX()) { 9352 if (memArg) 9353 Opc = numArgs == 3 ? X86::PCMPISTRM128rm : X86::PCMPESTRM128rm; 9354 else 9355 Opc = numArgs == 3 ? X86::PCMPISTRM128rr : X86::PCMPESTRM128rr; 9356 } else { 9357 if (memArg) 9358 Opc = numArgs == 3 ? X86::VPCMPISTRM128rm : X86::VPCMPESTRM128rm; 9359 else 9360 Opc = numArgs == 3 ? X86::VPCMPISTRM128rr : X86::VPCMPESTRM128rr; 9361 } 9362 9363 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(Opc)); 9364 9365 for (unsigned i = 0; i < numArgs; ++i) { 9366 MachineOperand &Op = MI->getOperand(i+1); 9367 9368 if (!(Op.isReg() && Op.isImplicit())) 9369 MIB.addOperand(Op); 9370 } 9371 9372 BuildMI(BB, dl, TII->get(X86::MOVAPSrr), MI->getOperand(0).getReg()) 9373 .addReg(X86::XMM0); 9374 9375 MI->eraseFromParent(); 9376 9377 return BB; 9378} 9379 9380MachineBasicBlock * 9381X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( 9382 MachineInstr *MI, 9383 MachineBasicBlock *MBB) const { 9384 // Emit code to save XMM registers to the stack. The ABI says that the 9385 // number of registers to save is given in %al, so it's theoretically 9386 // possible to do an indirect jump trick to avoid saving all of them, 9387 // however this code takes a simpler approach and just executes all 9388 // of the stores if %al is non-zero. It's less code, and it's probably 9389 // easier on the hardware branch predictor, and stores aren't all that 9390 // expensive anyway. 9391 9392 // Create the new basic blocks. One block contains all the XMM stores, 9393 // and one block is the final destination regardless of whether any 9394 // stores were performed. 9395 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 9396 MachineFunction *F = MBB->getParent(); 9397 MachineFunction::iterator MBBIter = MBB; 9398 ++MBBIter; 9399 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB); 9400 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB); 9401 F->insert(MBBIter, XMMSaveMBB); 9402 F->insert(MBBIter, EndMBB); 9403 9404 // Transfer the remainder of MBB and its successor edges to EndMBB. 9405 EndMBB->splice(EndMBB->begin(), MBB, 9406 llvm::next(MachineBasicBlock::iterator(MI)), 9407 MBB->end()); 9408 EndMBB->transferSuccessorsAndUpdatePHIs(MBB); 9409 9410 // The original block will now fall through to the XMM save block. 9411 MBB->addSuccessor(XMMSaveMBB); 9412 // The XMMSaveMBB will fall through to the end block. 9413 XMMSaveMBB->addSuccessor(EndMBB); 9414 9415 // Now add the instructions. 9416 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9417 DebugLoc DL = MI->getDebugLoc(); 9418 9419 unsigned CountReg = MI->getOperand(0).getReg(); 9420 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm(); 9421 int64_t VarArgsFPOffset = MI->getOperand(2).getImm(); 9422 9423 if (!Subtarget->isTargetWin64()) { 9424 // If %al is 0, branch around the XMM save block. 9425 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg); 9426 BuildMI(MBB, DL, TII->get(X86::JE_4)).addMBB(EndMBB); 9427 MBB->addSuccessor(EndMBB); 9428 } 9429 9430 // In the XMM save block, save all the XMM argument registers. 9431 for (int i = 3, e = MI->getNumOperands(); i != e; ++i) { 9432 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset; 9433 MachineMemOperand *MMO = 9434 F->getMachineMemOperand( 9435 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset), 9436 MachineMemOperand::MOStore, 9437 /*Size=*/16, /*Align=*/16); 9438 BuildMI(XMMSaveMBB, DL, TII->get(X86::MOVAPSmr)) 9439 .addFrameIndex(RegSaveFrameIndex) 9440 .addImm(/*Scale=*/1) 9441 .addReg(/*IndexReg=*/0) 9442 .addImm(/*Disp=*/Offset) 9443 .addReg(/*Segment=*/0) 9444 .addReg(MI->getOperand(i).getReg()) 9445 .addMemOperand(MMO); 9446 } 9447 9448 MI->eraseFromParent(); // The pseudo instruction is gone now. 9449 9450 return EndMBB; 9451} 9452 9453MachineBasicBlock * 9454X86TargetLowering::EmitLoweredSelect(MachineInstr *MI, 9455 MachineBasicBlock *BB) const { 9456 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9457 DebugLoc DL = MI->getDebugLoc(); 9458 9459 // To "insert" a SELECT_CC instruction, we actually have to insert the 9460 // diamond control-flow pattern. The incoming instruction knows the 9461 // destination vreg to set, the condition code register to branch on, the 9462 // true/false values to select between, and a branch opcode to use. 9463 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9464 MachineFunction::iterator It = BB; 9465 ++It; 9466 9467 // thisMBB: 9468 // ... 9469 // TrueVal = ... 9470 // cmpTY ccX, r1, r2 9471 // bCC copy1MBB 9472 // fallthrough --> copy0MBB 9473 MachineBasicBlock *thisMBB = BB; 9474 MachineFunction *F = BB->getParent(); 9475 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 9476 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 9477 F->insert(It, copy0MBB); 9478 F->insert(It, sinkMBB); 9479 9480 // If the EFLAGS register isn't dead in the terminator, then claim that it's 9481 // live into the sink and copy blocks. 9482 const MachineFunction *MF = BB->getParent(); 9483 const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo(); 9484 BitVector ReservedRegs = TRI->getReservedRegs(*MF); 9485 9486 for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) { 9487 const MachineOperand &MO = MI->getOperand(I); 9488 if (!MO.isReg() || !MO.isUse() || MO.isKill()) continue; 9489 unsigned Reg = MO.getReg(); 9490 if (Reg != X86::EFLAGS) continue; 9491 copy0MBB->addLiveIn(Reg); 9492 sinkMBB->addLiveIn(Reg); 9493 } 9494 9495 // Transfer the remainder of BB and its successor edges to sinkMBB. 9496 sinkMBB->splice(sinkMBB->begin(), BB, 9497 llvm::next(MachineBasicBlock::iterator(MI)), 9498 BB->end()); 9499 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 9500 9501 // Add the true and fallthrough blocks as its successors. 9502 BB->addSuccessor(copy0MBB); 9503 BB->addSuccessor(sinkMBB); 9504 9505 // Create the conditional branch instruction. 9506 unsigned Opc = 9507 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 9508 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB); 9509 9510 // copy0MBB: 9511 // %FalseValue = ... 9512 // # fallthrough to sinkMBB 9513 copy0MBB->addSuccessor(sinkMBB); 9514 9515 // sinkMBB: 9516 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 9517 // ... 9518 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 9519 TII->get(X86::PHI), MI->getOperand(0).getReg()) 9520 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 9521 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 9522 9523 MI->eraseFromParent(); // The pseudo instruction is gone now. 9524 return sinkMBB; 9525} 9526 9527MachineBasicBlock * 9528X86TargetLowering::EmitLoweredMingwAlloca(MachineInstr *MI, 9529 MachineBasicBlock *BB) const { 9530 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9531 DebugLoc DL = MI->getDebugLoc(); 9532 9533 // The lowering is pretty easy: we're just emitting the call to _alloca. The 9534 // non-trivial part is impdef of ESP. 9535 // FIXME: The code should be tweaked as soon as we'll try to do codegen for 9536 // mingw-w64. 9537 9538 BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32)) 9539 .addExternalSymbol("_alloca") 9540 .addReg(X86::EAX, RegState::Implicit) 9541 .addReg(X86::ESP, RegState::Implicit) 9542 .addReg(X86::EAX, RegState::Define | RegState::Implicit) 9543 .addReg(X86::ESP, RegState::Define | RegState::Implicit) 9544 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 9545 9546 MI->eraseFromParent(); // The pseudo instruction is gone now. 9547 return BB; 9548} 9549 9550MachineBasicBlock * 9551X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI, 9552 MachineBasicBlock *BB) const { 9553 // This is pretty easy. We're taking the value that we received from 9554 // our load from the relocation, sticking it in either RDI (x86-64) 9555 // or EAX and doing an indirect call. The return value will then 9556 // be in the normal return register. 9557 const X86InstrInfo *TII 9558 = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo()); 9559 DebugLoc DL = MI->getDebugLoc(); 9560 MachineFunction *F = BB->getParent(); 9561 9562 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?"); 9563 assert(MI->getOperand(3).isGlobal() && "This should be a global"); 9564 9565 if (Subtarget->is64Bit()) { 9566 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 9567 TII->get(X86::MOV64rm), X86::RDI) 9568 .addReg(X86::RIP) 9569 .addImm(0).addReg(0) 9570 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 9571 MI->getOperand(3).getTargetFlags()) 9572 .addReg(0); 9573 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m)); 9574 addDirectMem(MIB, X86::RDI); 9575 } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { 9576 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 9577 TII->get(X86::MOV32rm), X86::EAX) 9578 .addReg(0) 9579 .addImm(0).addReg(0) 9580 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 9581 MI->getOperand(3).getTargetFlags()) 9582 .addReg(0); 9583 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 9584 addDirectMem(MIB, X86::EAX); 9585 } else { 9586 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 9587 TII->get(X86::MOV32rm), X86::EAX) 9588 .addReg(TII->getGlobalBaseReg(F)) 9589 .addImm(0).addReg(0) 9590 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 9591 MI->getOperand(3).getTargetFlags()) 9592 .addReg(0); 9593 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 9594 addDirectMem(MIB, X86::EAX); 9595 } 9596 9597 MI->eraseFromParent(); // The pseudo instruction is gone now. 9598 return BB; 9599} 9600 9601MachineBasicBlock * 9602X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 9603 MachineBasicBlock *BB) const { 9604 switch (MI->getOpcode()) { 9605 default: assert(false && "Unexpected instr type to insert"); 9606 case X86::MINGW_ALLOCA: 9607 return EmitLoweredMingwAlloca(MI, BB); 9608 case X86::TLSCall_32: 9609 case X86::TLSCall_64: 9610 return EmitLoweredTLSCall(MI, BB); 9611 case X86::CMOV_GR8: 9612 case X86::CMOV_FR32: 9613 case X86::CMOV_FR64: 9614 case X86::CMOV_V4F32: 9615 case X86::CMOV_V2F64: 9616 case X86::CMOV_V2I64: 9617 case X86::CMOV_GR16: 9618 case X86::CMOV_GR32: 9619 case X86::CMOV_RFP32: 9620 case X86::CMOV_RFP64: 9621 case X86::CMOV_RFP80: 9622 return EmitLoweredSelect(MI, BB); 9623 9624 case X86::FP32_TO_INT16_IN_MEM: 9625 case X86::FP32_TO_INT32_IN_MEM: 9626 case X86::FP32_TO_INT64_IN_MEM: 9627 case X86::FP64_TO_INT16_IN_MEM: 9628 case X86::FP64_TO_INT32_IN_MEM: 9629 case X86::FP64_TO_INT64_IN_MEM: 9630 case X86::FP80_TO_INT16_IN_MEM: 9631 case X86::FP80_TO_INT32_IN_MEM: 9632 case X86::FP80_TO_INT64_IN_MEM: { 9633 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9634 DebugLoc DL = MI->getDebugLoc(); 9635 9636 // Change the floating point control register to use "round towards zero" 9637 // mode when truncating to an integer value. 9638 MachineFunction *F = BB->getParent(); 9639 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false); 9640 addFrameReference(BuildMI(*BB, MI, DL, 9641 TII->get(X86::FNSTCW16m)), CWFrameIdx); 9642 9643 // Load the old value of the high byte of the control word... 9644 unsigned OldCW = 9645 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass); 9646 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW), 9647 CWFrameIdx); 9648 9649 // Set the high part to be round to zero... 9650 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx) 9651 .addImm(0xC7F); 9652 9653 // Reload the modified control word now... 9654 addFrameReference(BuildMI(*BB, MI, DL, 9655 TII->get(X86::FLDCW16m)), CWFrameIdx); 9656 9657 // Restore the memory image of control word to original value 9658 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx) 9659 .addReg(OldCW); 9660 9661 // Get the X86 opcode to use. 9662 unsigned Opc; 9663 switch (MI->getOpcode()) { 9664 default: llvm_unreachable("illegal opcode!"); 9665 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 9666 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 9667 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 9668 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 9669 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 9670 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 9671 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 9672 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 9673 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 9674 } 9675 9676 X86AddressMode AM; 9677 MachineOperand &Op = MI->getOperand(0); 9678 if (Op.isReg()) { 9679 AM.BaseType = X86AddressMode::RegBase; 9680 AM.Base.Reg = Op.getReg(); 9681 } else { 9682 AM.BaseType = X86AddressMode::FrameIndexBase; 9683 AM.Base.FrameIndex = Op.getIndex(); 9684 } 9685 Op = MI->getOperand(1); 9686 if (Op.isImm()) 9687 AM.Scale = Op.getImm(); 9688 Op = MI->getOperand(2); 9689 if (Op.isImm()) 9690 AM.IndexReg = Op.getImm(); 9691 Op = MI->getOperand(3); 9692 if (Op.isGlobal()) { 9693 AM.GV = Op.getGlobal(); 9694 } else { 9695 AM.Disp = Op.getImm(); 9696 } 9697 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM) 9698 .addReg(MI->getOperand(X86::AddrNumOperands).getReg()); 9699 9700 // Reload the original control word now. 9701 addFrameReference(BuildMI(*BB, MI, DL, 9702 TII->get(X86::FLDCW16m)), CWFrameIdx); 9703 9704 MI->eraseFromParent(); // The pseudo instruction is gone now. 9705 return BB; 9706 } 9707 // String/text processing lowering. 9708 case X86::PCMPISTRM128REG: 9709 case X86::VPCMPISTRM128REG: 9710 return EmitPCMP(MI, BB, 3, false /* in-mem */); 9711 case X86::PCMPISTRM128MEM: 9712 case X86::VPCMPISTRM128MEM: 9713 return EmitPCMP(MI, BB, 3, true /* in-mem */); 9714 case X86::PCMPESTRM128REG: 9715 case X86::VPCMPESTRM128REG: 9716 return EmitPCMP(MI, BB, 5, false /* in mem */); 9717 case X86::PCMPESTRM128MEM: 9718 case X86::VPCMPESTRM128MEM: 9719 return EmitPCMP(MI, BB, 5, true /* in mem */); 9720 9721 // Atomic Lowering. 9722 case X86::ATOMAND32: 9723 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, 9724 X86::AND32ri, X86::MOV32rm, 9725 X86::LCMPXCHG32, 9726 X86::NOT32r, X86::EAX, 9727 X86::GR32RegisterClass); 9728 case X86::ATOMOR32: 9729 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr, 9730 X86::OR32ri, X86::MOV32rm, 9731 X86::LCMPXCHG32, 9732 X86::NOT32r, X86::EAX, 9733 X86::GR32RegisterClass); 9734 case X86::ATOMXOR32: 9735 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr, 9736 X86::XOR32ri, X86::MOV32rm, 9737 X86::LCMPXCHG32, 9738 X86::NOT32r, X86::EAX, 9739 X86::GR32RegisterClass); 9740 case X86::ATOMNAND32: 9741 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, 9742 X86::AND32ri, X86::MOV32rm, 9743 X86::LCMPXCHG32, 9744 X86::NOT32r, X86::EAX, 9745 X86::GR32RegisterClass, true); 9746 case X86::ATOMMIN32: 9747 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr); 9748 case X86::ATOMMAX32: 9749 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG32rr); 9750 case X86::ATOMUMIN32: 9751 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB32rr); 9752 case X86::ATOMUMAX32: 9753 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA32rr); 9754 9755 case X86::ATOMAND16: 9756 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr, 9757 X86::AND16ri, X86::MOV16rm, 9758 X86::LCMPXCHG16, 9759 X86::NOT16r, X86::AX, 9760 X86::GR16RegisterClass); 9761 case X86::ATOMOR16: 9762 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr, 9763 X86::OR16ri, X86::MOV16rm, 9764 X86::LCMPXCHG16, 9765 X86::NOT16r, X86::AX, 9766 X86::GR16RegisterClass); 9767 case X86::ATOMXOR16: 9768 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR16rr, 9769 X86::XOR16ri, X86::MOV16rm, 9770 X86::LCMPXCHG16, 9771 X86::NOT16r, X86::AX, 9772 X86::GR16RegisterClass); 9773 case X86::ATOMNAND16: 9774 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr, 9775 X86::AND16ri, X86::MOV16rm, 9776 X86::LCMPXCHG16, 9777 X86::NOT16r, X86::AX, 9778 X86::GR16RegisterClass, true); 9779 case X86::ATOMMIN16: 9780 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL16rr); 9781 case X86::ATOMMAX16: 9782 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG16rr); 9783 case X86::ATOMUMIN16: 9784 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB16rr); 9785 case X86::ATOMUMAX16: 9786 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA16rr); 9787 9788 case X86::ATOMAND8: 9789 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr, 9790 X86::AND8ri, X86::MOV8rm, 9791 X86::LCMPXCHG8, 9792 X86::NOT8r, X86::AL, 9793 X86::GR8RegisterClass); 9794 case X86::ATOMOR8: 9795 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr, 9796 X86::OR8ri, X86::MOV8rm, 9797 X86::LCMPXCHG8, 9798 X86::NOT8r, X86::AL, 9799 X86::GR8RegisterClass); 9800 case X86::ATOMXOR8: 9801 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR8rr, 9802 X86::XOR8ri, X86::MOV8rm, 9803 X86::LCMPXCHG8, 9804 X86::NOT8r, X86::AL, 9805 X86::GR8RegisterClass); 9806 case X86::ATOMNAND8: 9807 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr, 9808 X86::AND8ri, X86::MOV8rm, 9809 X86::LCMPXCHG8, 9810 X86::NOT8r, X86::AL, 9811 X86::GR8RegisterClass, true); 9812 // FIXME: There are no CMOV8 instructions; MIN/MAX need some other way. 9813 // This group is for 64-bit host. 9814 case X86::ATOMAND64: 9815 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr, 9816 X86::AND64ri32, X86::MOV64rm, 9817 X86::LCMPXCHG64, 9818 X86::NOT64r, X86::RAX, 9819 X86::GR64RegisterClass); 9820 case X86::ATOMOR64: 9821 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr, 9822 X86::OR64ri32, X86::MOV64rm, 9823 X86::LCMPXCHG64, 9824 X86::NOT64r, X86::RAX, 9825 X86::GR64RegisterClass); 9826 case X86::ATOMXOR64: 9827 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr, 9828 X86::XOR64ri32, X86::MOV64rm, 9829 X86::LCMPXCHG64, 9830 X86::NOT64r, X86::RAX, 9831 X86::GR64RegisterClass); 9832 case X86::ATOMNAND64: 9833 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr, 9834 X86::AND64ri32, X86::MOV64rm, 9835 X86::LCMPXCHG64, 9836 X86::NOT64r, X86::RAX, 9837 X86::GR64RegisterClass, true); 9838 case X86::ATOMMIN64: 9839 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL64rr); 9840 case X86::ATOMMAX64: 9841 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG64rr); 9842 case X86::ATOMUMIN64: 9843 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB64rr); 9844 case X86::ATOMUMAX64: 9845 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA64rr); 9846 9847 // This group does 64-bit operations on a 32-bit host. 9848 case X86::ATOMAND6432: 9849 return EmitAtomicBit6432WithCustomInserter(MI, BB, 9850 X86::AND32rr, X86::AND32rr, 9851 X86::AND32ri, X86::AND32ri, 9852 false); 9853 case X86::ATOMOR6432: 9854 return EmitAtomicBit6432WithCustomInserter(MI, BB, 9855 X86::OR32rr, X86::OR32rr, 9856 X86::OR32ri, X86::OR32ri, 9857 false); 9858 case X86::ATOMXOR6432: 9859 return EmitAtomicBit6432WithCustomInserter(MI, BB, 9860 X86::XOR32rr, X86::XOR32rr, 9861 X86::XOR32ri, X86::XOR32ri, 9862 false); 9863 case X86::ATOMNAND6432: 9864 return EmitAtomicBit6432WithCustomInserter(MI, BB, 9865 X86::AND32rr, X86::AND32rr, 9866 X86::AND32ri, X86::AND32ri, 9867 true); 9868 case X86::ATOMADD6432: 9869 return EmitAtomicBit6432WithCustomInserter(MI, BB, 9870 X86::ADD32rr, X86::ADC32rr, 9871 X86::ADD32ri, X86::ADC32ri, 9872 false); 9873 case X86::ATOMSUB6432: 9874 return EmitAtomicBit6432WithCustomInserter(MI, BB, 9875 X86::SUB32rr, X86::SBB32rr, 9876 X86::SUB32ri, X86::SBB32ri, 9877 false); 9878 case X86::ATOMSWAP6432: 9879 return EmitAtomicBit6432WithCustomInserter(MI, BB, 9880 X86::MOV32rr, X86::MOV32rr, 9881 X86::MOV32ri, X86::MOV32ri, 9882 false); 9883 case X86::VASTART_SAVE_XMM_REGS: 9884 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB); 9885 } 9886} 9887 9888//===----------------------------------------------------------------------===// 9889// X86 Optimization Hooks 9890//===----------------------------------------------------------------------===// 9891 9892void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 9893 const APInt &Mask, 9894 APInt &KnownZero, 9895 APInt &KnownOne, 9896 const SelectionDAG &DAG, 9897 unsigned Depth) const { 9898 unsigned Opc = Op.getOpcode(); 9899 assert((Opc >= ISD::BUILTIN_OP_END || 9900 Opc == ISD::INTRINSIC_WO_CHAIN || 9901 Opc == ISD::INTRINSIC_W_CHAIN || 9902 Opc == ISD::INTRINSIC_VOID) && 9903 "Should use MaskedValueIsZero if you don't know whether Op" 9904 " is a target node!"); 9905 9906 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything. 9907 switch (Opc) { 9908 default: break; 9909 case X86ISD::ADD: 9910 case X86ISD::SUB: 9911 case X86ISD::SMUL: 9912 case X86ISD::UMUL: 9913 case X86ISD::INC: 9914 case X86ISD::DEC: 9915 case X86ISD::OR: 9916 case X86ISD::XOR: 9917 case X86ISD::AND: 9918 // These nodes' second result is a boolean. 9919 if (Op.getResNo() == 0) 9920 break; 9921 // Fallthrough 9922 case X86ISD::SETCC: 9923 KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(), 9924 Mask.getBitWidth() - 1); 9925 break; 9926 } 9927} 9928 9929unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 9930 unsigned Depth) const { 9931 // SETCC_CARRY sets the dest to ~0 for true or 0 for false. 9932 if (Op.getOpcode() == X86ISD::SETCC_CARRY) 9933 return Op.getValueType().getScalarType().getSizeInBits(); 9934 9935 // Fallback case. 9936 return 1; 9937} 9938 9939/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 9940/// node is a GlobalAddress + offset. 9941bool X86TargetLowering::isGAPlusOffset(SDNode *N, 9942 const GlobalValue* &GA, 9943 int64_t &Offset) const { 9944 if (N->getOpcode() == X86ISD::Wrapper) { 9945 if (isa<GlobalAddressSDNode>(N->getOperand(0))) { 9946 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 9947 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset(); 9948 return true; 9949 } 9950 } 9951 return TargetLowering::isGAPlusOffset(N, GA, Offset); 9952} 9953 9954/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 9955/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 9956/// if the load addresses are consecutive, non-overlapping, and in the right 9957/// order. 9958static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 9959 const TargetLowering &TLI) { 9960 DebugLoc dl = N->getDebugLoc(); 9961 EVT VT = N->getValueType(0); 9962 9963 if (VT.getSizeInBits() != 128) 9964 return SDValue(); 9965 9966 SmallVector<SDValue, 16> Elts; 9967 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 9968 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0)); 9969 9970 return EltsFromConsecutiveLoads(VT, Elts, dl, DAG); 9971} 9972 9973/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index 9974/// generation and convert it from being a bunch of shuffles and extracts 9975/// to a simple store and scalar loads to extract the elements. 9976static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, 9977 const TargetLowering &TLI) { 9978 SDValue InputVector = N->getOperand(0); 9979 9980 // Only operate on vectors of 4 elements, where the alternative shuffling 9981 // gets to be more expensive. 9982 if (InputVector.getValueType() != MVT::v4i32) 9983 return SDValue(); 9984 9985 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a 9986 // single use which is a sign-extend or zero-extend, and all elements are 9987 // used. 9988 SmallVector<SDNode *, 4> Uses; 9989 unsigned ExtractedElements = 0; 9990 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(), 9991 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) { 9992 if (UI.getUse().getResNo() != InputVector.getResNo()) 9993 return SDValue(); 9994 9995 SDNode *Extract = *UI; 9996 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 9997 return SDValue(); 9998 9999 if (Extract->getValueType(0) != MVT::i32) 10000 return SDValue(); 10001 if (!Extract->hasOneUse()) 10002 return SDValue(); 10003 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND && 10004 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND) 10005 return SDValue(); 10006 if (!isa<ConstantSDNode>(Extract->getOperand(1))) 10007 return SDValue(); 10008 10009 // Record which element was extracted. 10010 ExtractedElements |= 10011 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue(); 10012 10013 Uses.push_back(Extract); 10014 } 10015 10016 // If not all the elements were used, this may not be worthwhile. 10017 if (ExtractedElements != 15) 10018 return SDValue(); 10019 10020 // Ok, we've now decided to do the transformation. 10021 DebugLoc dl = InputVector.getDebugLoc(); 10022 10023 // Store the value to a temporary stack slot. 10024 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType()); 10025 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr, 10026 MachinePointerInfo(), false, false, 0); 10027 10028 // Replace each use (extract) with a load of the appropriate element. 10029 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(), 10030 UE = Uses.end(); UI != UE; ++UI) { 10031 SDNode *Extract = *UI; 10032 10033 // Compute the element's address. 10034 SDValue Idx = Extract->getOperand(1); 10035 unsigned EltSize = 10036 InputVector.getValueType().getVectorElementType().getSizeInBits()/8; 10037 uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue(); 10038 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy()); 10039 10040 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), 10041 StackPtr, OffsetVal); 10042 10043 // Load the scalar. 10044 SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch, 10045 ScalarAddr, MachinePointerInfo(), 10046 false, false, 0); 10047 10048 // Replace the exact with the load. 10049 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar); 10050 } 10051 10052 // The replacement was made in place; don't return anything. 10053 return SDValue(); 10054} 10055 10056/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 10057static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 10058 const X86Subtarget *Subtarget) { 10059 DebugLoc DL = N->getDebugLoc(); 10060 SDValue Cond = N->getOperand(0); 10061 // Get the LHS/RHS of the select. 10062 SDValue LHS = N->getOperand(1); 10063 SDValue RHS = N->getOperand(2); 10064 10065 // If we have SSE[12] support, try to form min/max nodes. SSE min/max 10066 // instructions match the semantics of the common C idiom x<y?x:y but not 10067 // x<=y?x:y, because of how they handle negative zero (which can be 10068 // ignored in unsafe-math mode). 10069 if (Subtarget->hasSSE2() && 10070 (LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64) && 10071 Cond.getOpcode() == ISD::SETCC) { 10072 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 10073 10074 unsigned Opcode = 0; 10075 // Check for x CC y ? x : y. 10076 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) && 10077 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 10078 switch (CC) { 10079 default: break; 10080 case ISD::SETULT: 10081 // Converting this to a min would handle NaNs incorrectly, and swapping 10082 // the operands would cause it to handle comparisons between positive 10083 // and negative zero incorrectly. 10084 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 10085 if (!UnsafeFPMath && 10086 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 10087 break; 10088 std::swap(LHS, RHS); 10089 } 10090 Opcode = X86ISD::FMIN; 10091 break; 10092 case ISD::SETOLE: 10093 // Converting this to a min would handle comparisons between positive 10094 // and negative zero incorrectly. 10095 if (!UnsafeFPMath && 10096 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 10097 break; 10098 Opcode = X86ISD::FMIN; 10099 break; 10100 case ISD::SETULE: 10101 // Converting this to a min would handle both negative zeros and NaNs 10102 // incorrectly, but we can swap the operands to fix both. 10103 std::swap(LHS, RHS); 10104 case ISD::SETOLT: 10105 case ISD::SETLT: 10106 case ISD::SETLE: 10107 Opcode = X86ISD::FMIN; 10108 break; 10109 10110 case ISD::SETOGE: 10111 // Converting this to a max would handle comparisons between positive 10112 // and negative zero incorrectly. 10113 if (!UnsafeFPMath && 10114 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(LHS)) 10115 break; 10116 Opcode = X86ISD::FMAX; 10117 break; 10118 case ISD::SETUGT: 10119 // Converting this to a max would handle NaNs incorrectly, and swapping 10120 // the operands would cause it to handle comparisons between positive 10121 // and negative zero incorrectly. 10122 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 10123 if (!UnsafeFPMath && 10124 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 10125 break; 10126 std::swap(LHS, RHS); 10127 } 10128 Opcode = X86ISD::FMAX; 10129 break; 10130 case ISD::SETUGE: 10131 // Converting this to a max would handle both negative zeros and NaNs 10132 // incorrectly, but we can swap the operands to fix both. 10133 std::swap(LHS, RHS); 10134 case ISD::SETOGT: 10135 case ISD::SETGT: 10136 case ISD::SETGE: 10137 Opcode = X86ISD::FMAX; 10138 break; 10139 } 10140 // Check for x CC y ? y : x -- a min/max with reversed arms. 10141 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) && 10142 DAG.isEqualTo(RHS, Cond.getOperand(0))) { 10143 switch (CC) { 10144 default: break; 10145 case ISD::SETOGE: 10146 // Converting this to a min would handle comparisons between positive 10147 // and negative zero incorrectly, and swapping the operands would 10148 // cause it to handle NaNs incorrectly. 10149 if (!UnsafeFPMath && 10150 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) { 10151 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 10152 break; 10153 std::swap(LHS, RHS); 10154 } 10155 Opcode = X86ISD::FMIN; 10156 break; 10157 case ISD::SETUGT: 10158 // Converting this to a min would handle NaNs incorrectly. 10159 if (!UnsafeFPMath && 10160 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) 10161 break; 10162 Opcode = X86ISD::FMIN; 10163 break; 10164 case ISD::SETUGE: 10165 // Converting this to a min would handle both negative zeros and NaNs 10166 // incorrectly, but we can swap the operands to fix both. 10167 std::swap(LHS, RHS); 10168 case ISD::SETOGT: 10169 case ISD::SETGT: 10170 case ISD::SETGE: 10171 Opcode = X86ISD::FMIN; 10172 break; 10173 10174 case ISD::SETULT: 10175 // Converting this to a max would handle NaNs incorrectly. 10176 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 10177 break; 10178 Opcode = X86ISD::FMAX; 10179 break; 10180 case ISD::SETOLE: 10181 // Converting this to a max would handle comparisons between positive 10182 // and negative zero incorrectly, and swapping the operands would 10183 // cause it to handle NaNs incorrectly. 10184 if (!UnsafeFPMath && 10185 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) { 10186 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 10187 break; 10188 std::swap(LHS, RHS); 10189 } 10190 Opcode = X86ISD::FMAX; 10191 break; 10192 case ISD::SETULE: 10193 // Converting this to a max would handle both negative zeros and NaNs 10194 // incorrectly, but we can swap the operands to fix both. 10195 std::swap(LHS, RHS); 10196 case ISD::SETOLT: 10197 case ISD::SETLT: 10198 case ISD::SETLE: 10199 Opcode = X86ISD::FMAX; 10200 break; 10201 } 10202 } 10203 10204 if (Opcode) 10205 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS); 10206 } 10207 10208 // If this is a select between two integer constants, try to do some 10209 // optimizations. 10210 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) { 10211 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS)) 10212 // Don't do this for crazy integer types. 10213 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) { 10214 // If this is efficiently invertible, canonicalize the LHSC/RHSC values 10215 // so that TrueC (the true value) is larger than FalseC. 10216 bool NeedsCondInvert = false; 10217 10218 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) && 10219 // Efficiently invertible. 10220 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible. 10221 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible. 10222 isa<ConstantSDNode>(Cond.getOperand(1))))) { 10223 NeedsCondInvert = true; 10224 std::swap(TrueC, FalseC); 10225 } 10226 10227 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0. 10228 if (FalseC->getAPIntValue() == 0 && 10229 TrueC->getAPIntValue().isPowerOf2()) { 10230 if (NeedsCondInvert) // Invert the condition if needed. 10231 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 10232 DAG.getConstant(1, Cond.getValueType())); 10233 10234 // Zero extend the condition if needed. 10235 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond); 10236 10237 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 10238 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond, 10239 DAG.getConstant(ShAmt, MVT::i8)); 10240 } 10241 10242 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. 10243 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 10244 if (NeedsCondInvert) // Invert the condition if needed. 10245 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 10246 DAG.getConstant(1, Cond.getValueType())); 10247 10248 // Zero extend the condition if needed. 10249 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 10250 FalseC->getValueType(0), Cond); 10251 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 10252 SDValue(FalseC, 0)); 10253 } 10254 10255 // Optimize cases that will turn into an LEA instruction. This requires 10256 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 10257 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 10258 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 10259 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 10260 10261 bool isFastMultiplier = false; 10262 if (Diff < 10) { 10263 switch ((unsigned char)Diff) { 10264 default: break; 10265 case 1: // result = add base, cond 10266 case 2: // result = lea base( , cond*2) 10267 case 3: // result = lea base(cond, cond*2) 10268 case 4: // result = lea base( , cond*4) 10269 case 5: // result = lea base(cond, cond*4) 10270 case 8: // result = lea base( , cond*8) 10271 case 9: // result = lea base(cond, cond*8) 10272 isFastMultiplier = true; 10273 break; 10274 } 10275 } 10276 10277 if (isFastMultiplier) { 10278 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 10279 if (NeedsCondInvert) // Invert the condition if needed. 10280 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 10281 DAG.getConstant(1, Cond.getValueType())); 10282 10283 // Zero extend the condition if needed. 10284 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 10285 Cond); 10286 // Scale the condition by the difference. 10287 if (Diff != 1) 10288 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 10289 DAG.getConstant(Diff, Cond.getValueType())); 10290 10291 // Add the base if non-zero. 10292 if (FalseC->getAPIntValue() != 0) 10293 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 10294 SDValue(FalseC, 0)); 10295 return Cond; 10296 } 10297 } 10298 } 10299 } 10300 10301 return SDValue(); 10302} 10303 10304/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL] 10305static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, 10306 TargetLowering::DAGCombinerInfo &DCI) { 10307 DebugLoc DL = N->getDebugLoc(); 10308 10309 // If the flag operand isn't dead, don't touch this CMOV. 10310 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty()) 10311 return SDValue(); 10312 10313 // If this is a select between two integer constants, try to do some 10314 // optimizations. Note that the operands are ordered the opposite of SELECT 10315 // operands. 10316 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(N->getOperand(1))) { 10317 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 10318 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is 10319 // larger than FalseC (the false value). 10320 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2); 10321 10322 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) { 10323 CC = X86::GetOppositeBranchCondition(CC); 10324 std::swap(TrueC, FalseC); 10325 } 10326 10327 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0. 10328 // This is efficient for any integer data type (including i8/i16) and 10329 // shift amount. 10330 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) { 10331 SDValue Cond = N->getOperand(3); 10332 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 10333 DAG.getConstant(CC, MVT::i8), Cond); 10334 10335 // Zero extend the condition if needed. 10336 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond); 10337 10338 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 10339 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond, 10340 DAG.getConstant(ShAmt, MVT::i8)); 10341 if (N->getNumValues() == 2) // Dead flag value? 10342 return DCI.CombineTo(N, Cond, SDValue()); 10343 return Cond; 10344 } 10345 10346 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient 10347 // for any integer data type, including i8/i16. 10348 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 10349 SDValue Cond = N->getOperand(3); 10350 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 10351 DAG.getConstant(CC, MVT::i8), Cond); 10352 10353 // Zero extend the condition if needed. 10354 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 10355 FalseC->getValueType(0), Cond); 10356 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 10357 SDValue(FalseC, 0)); 10358 10359 if (N->getNumValues() == 2) // Dead flag value? 10360 return DCI.CombineTo(N, Cond, SDValue()); 10361 return Cond; 10362 } 10363 10364 // Optimize cases that will turn into an LEA instruction. This requires 10365 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 10366 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 10367 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 10368 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 10369 10370 bool isFastMultiplier = false; 10371 if (Diff < 10) { 10372 switch ((unsigned char)Diff) { 10373 default: break; 10374 case 1: // result = add base, cond 10375 case 2: // result = lea base( , cond*2) 10376 case 3: // result = lea base(cond, cond*2) 10377 case 4: // result = lea base( , cond*4) 10378 case 5: // result = lea base(cond, cond*4) 10379 case 8: // result = lea base( , cond*8) 10380 case 9: // result = lea base(cond, cond*8) 10381 isFastMultiplier = true; 10382 break; 10383 } 10384 } 10385 10386 if (isFastMultiplier) { 10387 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 10388 SDValue Cond = N->getOperand(3); 10389 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 10390 DAG.getConstant(CC, MVT::i8), Cond); 10391 // Zero extend the condition if needed. 10392 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 10393 Cond); 10394 // Scale the condition by the difference. 10395 if (Diff != 1) 10396 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 10397 DAG.getConstant(Diff, Cond.getValueType())); 10398 10399 // Add the base if non-zero. 10400 if (FalseC->getAPIntValue() != 0) 10401 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 10402 SDValue(FalseC, 0)); 10403 if (N->getNumValues() == 2) // Dead flag value? 10404 return DCI.CombineTo(N, Cond, SDValue()); 10405 return Cond; 10406 } 10407 } 10408 } 10409 } 10410 return SDValue(); 10411} 10412 10413 10414/// PerformMulCombine - Optimize a single multiply with constant into two 10415/// in order to implement it with two cheaper instructions, e.g. 10416/// LEA + SHL, LEA + LEA. 10417static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG, 10418 TargetLowering::DAGCombinerInfo &DCI) { 10419 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 10420 return SDValue(); 10421 10422 EVT VT = N->getValueType(0); 10423 if (VT != MVT::i64) 10424 return SDValue(); 10425 10426 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 10427 if (!C) 10428 return SDValue(); 10429 uint64_t MulAmt = C->getZExtValue(); 10430 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9) 10431 return SDValue(); 10432 10433 uint64_t MulAmt1 = 0; 10434 uint64_t MulAmt2 = 0; 10435 if ((MulAmt % 9) == 0) { 10436 MulAmt1 = 9; 10437 MulAmt2 = MulAmt / 9; 10438 } else if ((MulAmt % 5) == 0) { 10439 MulAmt1 = 5; 10440 MulAmt2 = MulAmt / 5; 10441 } else if ((MulAmt % 3) == 0) { 10442 MulAmt1 = 3; 10443 MulAmt2 = MulAmt / 3; 10444 } 10445 if (MulAmt2 && 10446 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){ 10447 DebugLoc DL = N->getDebugLoc(); 10448 10449 if (isPowerOf2_64(MulAmt2) && 10450 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD)) 10451 // If second multiplifer is pow2, issue it first. We want the multiply by 10452 // 3, 5, or 9 to be folded into the addressing mode unless the lone use 10453 // is an add. 10454 std::swap(MulAmt1, MulAmt2); 10455 10456 SDValue NewMul; 10457 if (isPowerOf2_64(MulAmt1)) 10458 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 10459 DAG.getConstant(Log2_64(MulAmt1), MVT::i8)); 10460 else 10461 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), 10462 DAG.getConstant(MulAmt1, VT)); 10463 10464 if (isPowerOf2_64(MulAmt2)) 10465 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul, 10466 DAG.getConstant(Log2_64(MulAmt2), MVT::i8)); 10467 else 10468 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul, 10469 DAG.getConstant(MulAmt2, VT)); 10470 10471 // Do not add new nodes to DAG combiner worklist. 10472 DCI.CombineTo(N, NewMul, false); 10473 } 10474 return SDValue(); 10475} 10476 10477static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { 10478 SDValue N0 = N->getOperand(0); 10479 SDValue N1 = N->getOperand(1); 10480 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 10481 EVT VT = N0.getValueType(); 10482 10483 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2)) 10484 // since the result of setcc_c is all zero's or all ones. 10485 if (N1C && N0.getOpcode() == ISD::AND && 10486 N0.getOperand(1).getOpcode() == ISD::Constant) { 10487 SDValue N00 = N0.getOperand(0); 10488 if (N00.getOpcode() == X86ISD::SETCC_CARRY || 10489 ((N00.getOpcode() == ISD::ANY_EXTEND || 10490 N00.getOpcode() == ISD::ZERO_EXTEND) && 10491 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) { 10492 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 10493 APInt ShAmt = N1C->getAPIntValue(); 10494 Mask = Mask.shl(ShAmt); 10495 if (Mask != 0) 10496 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 10497 N00, DAG.getConstant(Mask, VT)); 10498 } 10499 } 10500 10501 return SDValue(); 10502} 10503 10504/// PerformShiftCombine - Transforms vector shift nodes to use vector shifts 10505/// when possible. 10506static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG, 10507 const X86Subtarget *Subtarget) { 10508 EVT VT = N->getValueType(0); 10509 if (!VT.isVector() && VT.isInteger() && 10510 N->getOpcode() == ISD::SHL) 10511 return PerformSHLCombine(N, DAG); 10512 10513 // On X86 with SSE2 support, we can transform this to a vector shift if 10514 // all elements are shifted by the same amount. We can't do this in legalize 10515 // because the a constant vector is typically transformed to a constant pool 10516 // so we have no knowledge of the shift amount. 10517 if (!Subtarget->hasSSE2()) 10518 return SDValue(); 10519 10520 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16) 10521 return SDValue(); 10522 10523 SDValue ShAmtOp = N->getOperand(1); 10524 EVT EltVT = VT.getVectorElementType(); 10525 DebugLoc DL = N->getDebugLoc(); 10526 SDValue BaseShAmt = SDValue(); 10527 if (ShAmtOp.getOpcode() == ISD::BUILD_VECTOR) { 10528 unsigned NumElts = VT.getVectorNumElements(); 10529 unsigned i = 0; 10530 for (; i != NumElts; ++i) { 10531 SDValue Arg = ShAmtOp.getOperand(i); 10532 if (Arg.getOpcode() == ISD::UNDEF) continue; 10533 BaseShAmt = Arg; 10534 break; 10535 } 10536 for (; i != NumElts; ++i) { 10537 SDValue Arg = ShAmtOp.getOperand(i); 10538 if (Arg.getOpcode() == ISD::UNDEF) continue; 10539 if (Arg != BaseShAmt) { 10540 return SDValue(); 10541 } 10542 } 10543 } else if (ShAmtOp.getOpcode() == ISD::VECTOR_SHUFFLE && 10544 cast<ShuffleVectorSDNode>(ShAmtOp)->isSplat()) { 10545 SDValue InVec = ShAmtOp.getOperand(0); 10546 if (InVec.getOpcode() == ISD::BUILD_VECTOR) { 10547 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 10548 unsigned i = 0; 10549 for (; i != NumElts; ++i) { 10550 SDValue Arg = InVec.getOperand(i); 10551 if (Arg.getOpcode() == ISD::UNDEF) continue; 10552 BaseShAmt = Arg; 10553 break; 10554 } 10555 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) { 10556 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(InVec.getOperand(2))) { 10557 unsigned SplatIdx= cast<ShuffleVectorSDNode>(ShAmtOp)->getSplatIndex(); 10558 if (C->getZExtValue() == SplatIdx) 10559 BaseShAmt = InVec.getOperand(1); 10560 } 10561 } 10562 if (BaseShAmt.getNode() == 0) 10563 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, ShAmtOp, 10564 DAG.getIntPtrConstant(0)); 10565 } else 10566 return SDValue(); 10567 10568 // The shift amount is an i32. 10569 if (EltVT.bitsGT(MVT::i32)) 10570 BaseShAmt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, BaseShAmt); 10571 else if (EltVT.bitsLT(MVT::i32)) 10572 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, BaseShAmt); 10573 10574 // The shift amount is identical so we can do a vector shift. 10575 SDValue ValOp = N->getOperand(0); 10576 switch (N->getOpcode()) { 10577 default: 10578 llvm_unreachable("Unknown shift opcode!"); 10579 break; 10580 case ISD::SHL: 10581 if (VT == MVT::v2i64) 10582 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 10583 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32), 10584 ValOp, BaseShAmt); 10585 if (VT == MVT::v4i32) 10586 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 10587 DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32), 10588 ValOp, BaseShAmt); 10589 if (VT == MVT::v8i16) 10590 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 10591 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), 10592 ValOp, BaseShAmt); 10593 break; 10594 case ISD::SRA: 10595 if (VT == MVT::v4i32) 10596 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 10597 DAG.getConstant(Intrinsic::x86_sse2_psrai_d, MVT::i32), 10598 ValOp, BaseShAmt); 10599 if (VT == MVT::v8i16) 10600 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 10601 DAG.getConstant(Intrinsic::x86_sse2_psrai_w, MVT::i32), 10602 ValOp, BaseShAmt); 10603 break; 10604 case ISD::SRL: 10605 if (VT == MVT::v2i64) 10606 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 10607 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32), 10608 ValOp, BaseShAmt); 10609 if (VT == MVT::v4i32) 10610 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 10611 DAG.getConstant(Intrinsic::x86_sse2_psrli_d, MVT::i32), 10612 ValOp, BaseShAmt); 10613 if (VT == MVT::v8i16) 10614 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 10615 DAG.getConstant(Intrinsic::x86_sse2_psrli_w, MVT::i32), 10616 ValOp, BaseShAmt); 10617 break; 10618 } 10619 return SDValue(); 10620} 10621 10622static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, 10623 TargetLowering::DAGCombinerInfo &DCI, 10624 const X86Subtarget *Subtarget) { 10625 if (DCI.isBeforeLegalizeOps()) 10626 return SDValue(); 10627 10628 EVT VT = N->getValueType(0); 10629 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64) 10630 return SDValue(); 10631 10632 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) 10633 SDValue N0 = N->getOperand(0); 10634 SDValue N1 = N->getOperand(1); 10635 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 10636 std::swap(N0, N1); 10637 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 10638 return SDValue(); 10639 if (!N0.hasOneUse() || !N1.hasOneUse()) 10640 return SDValue(); 10641 10642 SDValue ShAmt0 = N0.getOperand(1); 10643 if (ShAmt0.getValueType() != MVT::i8) 10644 return SDValue(); 10645 SDValue ShAmt1 = N1.getOperand(1); 10646 if (ShAmt1.getValueType() != MVT::i8) 10647 return SDValue(); 10648 if (ShAmt0.getOpcode() == ISD::TRUNCATE) 10649 ShAmt0 = ShAmt0.getOperand(0); 10650 if (ShAmt1.getOpcode() == ISD::TRUNCATE) 10651 ShAmt1 = ShAmt1.getOperand(0); 10652 10653 DebugLoc DL = N->getDebugLoc(); 10654 unsigned Opc = X86ISD::SHLD; 10655 SDValue Op0 = N0.getOperand(0); 10656 SDValue Op1 = N1.getOperand(0); 10657 if (ShAmt0.getOpcode() == ISD::SUB) { 10658 Opc = X86ISD::SHRD; 10659 std::swap(Op0, Op1); 10660 std::swap(ShAmt0, ShAmt1); 10661 } 10662 10663 unsigned Bits = VT.getSizeInBits(); 10664 if (ShAmt1.getOpcode() == ISD::SUB) { 10665 SDValue Sum = ShAmt1.getOperand(0); 10666 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) { 10667 SDValue ShAmt1Op1 = ShAmt1.getOperand(1); 10668 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE) 10669 ShAmt1Op1 = ShAmt1Op1.getOperand(0); 10670 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0) 10671 return DAG.getNode(Opc, DL, VT, 10672 Op0, Op1, 10673 DAG.getNode(ISD::TRUNCATE, DL, 10674 MVT::i8, ShAmt0)); 10675 } 10676 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) { 10677 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0); 10678 if (ShAmt0C && 10679 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits) 10680 return DAG.getNode(Opc, DL, VT, 10681 N0.getOperand(0), N1.getOperand(0), 10682 DAG.getNode(ISD::TRUNCATE, DL, 10683 MVT::i8, ShAmt0)); 10684 } 10685 10686 return SDValue(); 10687} 10688 10689/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 10690static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, 10691 const X86Subtarget *Subtarget) { 10692 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 10693 // the FP state in cases where an emms may be missing. 10694 // A preferable solution to the general problem is to figure out the right 10695 // places to insert EMMS. This qualifies as a quick hack. 10696 10697 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode. 10698 StoreSDNode *St = cast<StoreSDNode>(N); 10699 EVT VT = St->getValue().getValueType(); 10700 if (VT.getSizeInBits() != 64) 10701 return SDValue(); 10702 10703 const Function *F = DAG.getMachineFunction().getFunction(); 10704 bool NoImplicitFloatOps = F->hasFnAttr(Attribute::NoImplicitFloat); 10705 bool F64IsLegal = !UseSoftFloat && !NoImplicitFloatOps 10706 && Subtarget->hasSSE2(); 10707 if ((VT.isVector() || 10708 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) && 10709 isa<LoadSDNode>(St->getValue()) && 10710 !cast<LoadSDNode>(St->getValue())->isVolatile() && 10711 St->getChain().hasOneUse() && !St->isVolatile()) { 10712 SDNode* LdVal = St->getValue().getNode(); 10713 LoadSDNode *Ld = 0; 10714 int TokenFactorIndex = -1; 10715 SmallVector<SDValue, 8> Ops; 10716 SDNode* ChainVal = St->getChain().getNode(); 10717 // Must be a store of a load. We currently handle two cases: the load 10718 // is a direct child, and it's under an intervening TokenFactor. It is 10719 // possible to dig deeper under nested TokenFactors. 10720 if (ChainVal == LdVal) 10721 Ld = cast<LoadSDNode>(St->getChain()); 10722 else if (St->getValue().hasOneUse() && 10723 ChainVal->getOpcode() == ISD::TokenFactor) { 10724 for (unsigned i=0, e = ChainVal->getNumOperands(); i != e; ++i) { 10725 if (ChainVal->getOperand(i).getNode() == LdVal) { 10726 TokenFactorIndex = i; 10727 Ld = cast<LoadSDNode>(St->getValue()); 10728 } else 10729 Ops.push_back(ChainVal->getOperand(i)); 10730 } 10731 } 10732 10733 if (!Ld || !ISD::isNormalLoad(Ld)) 10734 return SDValue(); 10735 10736 // If this is not the MMX case, i.e. we are just turning i64 load/store 10737 // into f64 load/store, avoid the transformation if there are multiple 10738 // uses of the loaded value. 10739 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0)) 10740 return SDValue(); 10741 10742 DebugLoc LdDL = Ld->getDebugLoc(); 10743 DebugLoc StDL = N->getDebugLoc(); 10744 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 10745 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store 10746 // pair instead. 10747 if (Subtarget->is64Bit() || F64IsLegal) { 10748 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64; 10749 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(), 10750 Ld->getPointerInfo(), Ld->isVolatile(), 10751 Ld->isNonTemporal(), Ld->getAlignment()); 10752 SDValue NewChain = NewLd.getValue(1); 10753 if (TokenFactorIndex != -1) { 10754 Ops.push_back(NewChain); 10755 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 10756 Ops.size()); 10757 } 10758 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(), 10759 St->getPointerInfo(), 10760 St->isVolatile(), St->isNonTemporal(), 10761 St->getAlignment()); 10762 } 10763 10764 // Otherwise, lower to two pairs of 32-bit loads / stores. 10765 SDValue LoAddr = Ld->getBasePtr(); 10766 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr, 10767 DAG.getConstant(4, MVT::i32)); 10768 10769 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr, 10770 Ld->getPointerInfo(), 10771 Ld->isVolatile(), Ld->isNonTemporal(), 10772 Ld->getAlignment()); 10773 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr, 10774 Ld->getPointerInfo().getWithOffset(4), 10775 Ld->isVolatile(), Ld->isNonTemporal(), 10776 MinAlign(Ld->getAlignment(), 4)); 10777 10778 SDValue NewChain = LoLd.getValue(1); 10779 if (TokenFactorIndex != -1) { 10780 Ops.push_back(LoLd); 10781 Ops.push_back(HiLd); 10782 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 10783 Ops.size()); 10784 } 10785 10786 LoAddr = St->getBasePtr(); 10787 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr, 10788 DAG.getConstant(4, MVT::i32)); 10789 10790 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr, 10791 St->getPointerInfo(), 10792 St->isVolatile(), St->isNonTemporal(), 10793 St->getAlignment()); 10794 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr, 10795 St->getPointerInfo().getWithOffset(4), 10796 St->isVolatile(), 10797 St->isNonTemporal(), 10798 MinAlign(St->getAlignment(), 4)); 10799 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt); 10800 } 10801 return SDValue(); 10802} 10803 10804/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 10805/// X86ISD::FXOR nodes. 10806static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 10807 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 10808 // F[X]OR(0.0, x) -> x 10809 // F[X]OR(x, 0.0) -> x 10810 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 10811 if (C->getValueAPF().isPosZero()) 10812 return N->getOperand(1); 10813 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 10814 if (C->getValueAPF().isPosZero()) 10815 return N->getOperand(0); 10816 return SDValue(); 10817} 10818 10819/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 10820static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 10821 // FAND(0.0, x) -> 0.0 10822 // FAND(x, 0.0) -> 0.0 10823 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 10824 if (C->getValueAPF().isPosZero()) 10825 return N->getOperand(0); 10826 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 10827 if (C->getValueAPF().isPosZero()) 10828 return N->getOperand(1); 10829 return SDValue(); 10830} 10831 10832static SDValue PerformBTCombine(SDNode *N, 10833 SelectionDAG &DAG, 10834 TargetLowering::DAGCombinerInfo &DCI) { 10835 // BT ignores high bits in the bit index operand. 10836 SDValue Op1 = N->getOperand(1); 10837 if (Op1.hasOneUse()) { 10838 unsigned BitWidth = Op1.getValueSizeInBits(); 10839 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth)); 10840 APInt KnownZero, KnownOne; 10841 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 10842 !DCI.isBeforeLegalizeOps()); 10843 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10844 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) || 10845 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO)) 10846 DCI.CommitTargetLoweringOpt(TLO); 10847 } 10848 return SDValue(); 10849} 10850 10851static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) { 10852 SDValue Op = N->getOperand(0); 10853 if (Op.getOpcode() == ISD::BIT_CONVERT) 10854 Op = Op.getOperand(0); 10855 EVT VT = N->getValueType(0), OpVT = Op.getValueType(); 10856 if (Op.getOpcode() == X86ISD::VZEXT_LOAD && 10857 VT.getVectorElementType().getSizeInBits() == 10858 OpVT.getVectorElementType().getSizeInBits()) { 10859 return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, Op); 10860 } 10861 return SDValue(); 10862} 10863 10864static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG) { 10865 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) -> 10866 // (and (i32 x86isd::setcc_carry), 1) 10867 // This eliminates the zext. This transformation is necessary because 10868 // ISD::SETCC is always legalized to i8. 10869 DebugLoc dl = N->getDebugLoc(); 10870 SDValue N0 = N->getOperand(0); 10871 EVT VT = N->getValueType(0); 10872 if (N0.getOpcode() == ISD::AND && 10873 N0.hasOneUse() && 10874 N0.getOperand(0).hasOneUse()) { 10875 SDValue N00 = N0.getOperand(0); 10876 if (N00.getOpcode() != X86ISD::SETCC_CARRY) 10877 return SDValue(); 10878 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 10879 if (!C || C->getZExtValue() != 1) 10880 return SDValue(); 10881 return DAG.getNode(ISD::AND, dl, VT, 10882 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, 10883 N00.getOperand(0), N00.getOperand(1)), 10884 DAG.getConstant(1, VT)); 10885 } 10886 10887 return SDValue(); 10888} 10889 10890SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, 10891 DAGCombinerInfo &DCI) const { 10892 SelectionDAG &DAG = DCI.DAG; 10893 switch (N->getOpcode()) { 10894 default: break; 10895 case ISD::EXTRACT_VECTOR_ELT: 10896 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, *this); 10897 case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget); 10898 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI); 10899 case ISD::MUL: return PerformMulCombine(N, DAG, DCI); 10900 case ISD::SHL: 10901 case ISD::SRA: 10902 case ISD::SRL: return PerformShiftCombine(N, DAG, Subtarget); 10903 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget); 10904 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); 10905 case X86ISD::FXOR: 10906 case X86ISD::FOR: return PerformFORCombine(N, DAG); 10907 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 10908 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); 10909 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); 10910 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG); 10911 case X86ISD::SHUFPS: // Handle all target specific shuffles 10912 case X86ISD::SHUFPD: 10913 case X86ISD::PALIGN: 10914 case X86ISD::PUNPCKHBW: 10915 case X86ISD::PUNPCKHWD: 10916 case X86ISD::PUNPCKHDQ: 10917 case X86ISD::PUNPCKHQDQ: 10918 case X86ISD::UNPCKHPS: 10919 case X86ISD::UNPCKHPD: 10920 case X86ISD::PUNPCKLBW: 10921 case X86ISD::PUNPCKLWD: 10922 case X86ISD::PUNPCKLDQ: 10923 case X86ISD::PUNPCKLQDQ: 10924 case X86ISD::UNPCKLPS: 10925 case X86ISD::UNPCKLPD: 10926 case X86ISD::MOVHLPS: 10927 case X86ISD::MOVLHPS: 10928 case X86ISD::PSHUFD: 10929 case X86ISD::PSHUFHW: 10930 case X86ISD::PSHUFLW: 10931 case X86ISD::MOVSS: 10932 case X86ISD::MOVSD: 10933 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, *this); 10934 } 10935 10936 return SDValue(); 10937} 10938 10939/// isTypeDesirableForOp - Return true if the target has native support for 10940/// the specified value type and it is 'desirable' to use the type for the 10941/// given node type. e.g. On x86 i16 is legal, but undesirable since i16 10942/// instruction encodings are longer and some i16 instructions are slow. 10943bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const { 10944 if (!isTypeLegal(VT)) 10945 return false; 10946 if (VT != MVT::i16) 10947 return true; 10948 10949 switch (Opc) { 10950 default: 10951 return true; 10952 case ISD::LOAD: 10953 case ISD::SIGN_EXTEND: 10954 case ISD::ZERO_EXTEND: 10955 case ISD::ANY_EXTEND: 10956 case ISD::SHL: 10957 case ISD::SRL: 10958 case ISD::SUB: 10959 case ISD::ADD: 10960 case ISD::MUL: 10961 case ISD::AND: 10962 case ISD::OR: 10963 case ISD::XOR: 10964 return false; 10965 } 10966} 10967 10968/// IsDesirableToPromoteOp - This method query the target whether it is 10969/// beneficial for dag combiner to promote the specified node. If true, it 10970/// should return the desired promotion type by reference. 10971bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { 10972 EVT VT = Op.getValueType(); 10973 if (VT != MVT::i16) 10974 return false; 10975 10976 bool Promote = false; 10977 bool Commute = false; 10978 switch (Op.getOpcode()) { 10979 default: break; 10980 case ISD::LOAD: { 10981 LoadSDNode *LD = cast<LoadSDNode>(Op); 10982 // If the non-extending load has a single use and it's not live out, then it 10983 // might be folded. 10984 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&& 10985 Op.hasOneUse()*/) { 10986 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 10987 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 10988 // The only case where we'd want to promote LOAD (rather then it being 10989 // promoted as an operand is when it's only use is liveout. 10990 if (UI->getOpcode() != ISD::CopyToReg) 10991 return false; 10992 } 10993 } 10994 Promote = true; 10995 break; 10996 } 10997 case ISD::SIGN_EXTEND: 10998 case ISD::ZERO_EXTEND: 10999 case ISD::ANY_EXTEND: 11000 Promote = true; 11001 break; 11002 case ISD::SHL: 11003 case ISD::SRL: { 11004 SDValue N0 = Op.getOperand(0); 11005 // Look out for (store (shl (load), x)). 11006 if (MayFoldLoad(N0) && MayFoldIntoStore(Op)) 11007 return false; 11008 Promote = true; 11009 break; 11010 } 11011 case ISD::ADD: 11012 case ISD::MUL: 11013 case ISD::AND: 11014 case ISD::OR: 11015 case ISD::XOR: 11016 Commute = true; 11017 // fallthrough 11018 case ISD::SUB: { 11019 SDValue N0 = Op.getOperand(0); 11020 SDValue N1 = Op.getOperand(1); 11021 if (!Commute && MayFoldLoad(N1)) 11022 return false; 11023 // Avoid disabling potential load folding opportunities. 11024 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op))) 11025 return false; 11026 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op))) 11027 return false; 11028 Promote = true; 11029 } 11030 } 11031 11032 PVT = MVT::i32; 11033 return Promote; 11034} 11035 11036//===----------------------------------------------------------------------===// 11037// X86 Inline Assembly Support 11038//===----------------------------------------------------------------------===// 11039 11040static bool LowerToBSwap(CallInst *CI) { 11041 // FIXME: this should verify that we are targetting a 486 or better. If not, 11042 // we will turn this bswap into something that will be lowered to logical ops 11043 // instead of emitting the bswap asm. For now, we don't support 486 or lower 11044 // so don't worry about this. 11045 11046 // Verify this is a simple bswap. 11047 if (CI->getNumArgOperands() != 1 || 11048 CI->getType() != CI->getArgOperand(0)->getType() || 11049 !CI->getType()->isIntegerTy()) 11050 return false; 11051 11052 const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 11053 if (!Ty || Ty->getBitWidth() % 16 != 0) 11054 return false; 11055 11056 // Okay, we can do this xform, do so now. 11057 const Type *Tys[] = { Ty }; 11058 Module *M = CI->getParent()->getParent()->getParent(); 11059 Constant *Int = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1); 11060 11061 Value *Op = CI->getArgOperand(0); 11062 Op = CallInst::Create(Int, Op, CI->getName(), CI); 11063 11064 CI->replaceAllUsesWith(Op); 11065 CI->eraseFromParent(); 11066 return true; 11067} 11068 11069bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { 11070 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 11071 std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints(); 11072 11073 std::string AsmStr = IA->getAsmString(); 11074 11075 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a" 11076 SmallVector<StringRef, 4> AsmPieces; 11077 SplitString(AsmStr, AsmPieces, "\n"); // ; as separator? 11078 11079 switch (AsmPieces.size()) { 11080 default: return false; 11081 case 1: 11082 AsmStr = AsmPieces[0]; 11083 AsmPieces.clear(); 11084 SplitString(AsmStr, AsmPieces, " \t"); // Split with whitespace. 11085 11086 // bswap $0 11087 if (AsmPieces.size() == 2 && 11088 (AsmPieces[0] == "bswap" || 11089 AsmPieces[0] == "bswapq" || 11090 AsmPieces[0] == "bswapl") && 11091 (AsmPieces[1] == "$0" || 11092 AsmPieces[1] == "${0:q}")) { 11093 // No need to check constraints, nothing other than the equivalent of 11094 // "=r,0" would be valid here. 11095 return LowerToBSwap(CI); 11096 } 11097 // rorw $$8, ${0:w} --> llvm.bswap.i16 11098 if (CI->getType()->isIntegerTy(16) && 11099 AsmPieces.size() == 3 && 11100 (AsmPieces[0] == "rorw" || AsmPieces[0] == "rolw") && 11101 AsmPieces[1] == "$$8," && 11102 AsmPieces[2] == "${0:w}" && 11103 IA->getConstraintString().compare(0, 5, "=r,0,") == 0) { 11104 AsmPieces.clear(); 11105 const std::string &Constraints = IA->getConstraintString(); 11106 SplitString(StringRef(Constraints).substr(5), AsmPieces, ","); 11107 std::sort(AsmPieces.begin(), AsmPieces.end()); 11108 if (AsmPieces.size() == 4 && 11109 AsmPieces[0] == "~{cc}" && 11110 AsmPieces[1] == "~{dirflag}" && 11111 AsmPieces[2] == "~{flags}" && 11112 AsmPieces[3] == "~{fpsr}") { 11113 return LowerToBSwap(CI); 11114 } 11115 } 11116 break; 11117 case 3: 11118 if (CI->getType()->isIntegerTy(64) && 11119 Constraints.size() >= 2 && 11120 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" && 11121 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") { 11122 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64 11123 SmallVector<StringRef, 4> Words; 11124 SplitString(AsmPieces[0], Words, " \t"); 11125 if (Words.size() == 2 && Words[0] == "bswap" && Words[1] == "%eax") { 11126 Words.clear(); 11127 SplitString(AsmPieces[1], Words, " \t"); 11128 if (Words.size() == 2 && Words[0] == "bswap" && Words[1] == "%edx") { 11129 Words.clear(); 11130 SplitString(AsmPieces[2], Words, " \t,"); 11131 if (Words.size() == 3 && Words[0] == "xchgl" && Words[1] == "%eax" && 11132 Words[2] == "%edx") { 11133 return LowerToBSwap(CI); 11134 } 11135 } 11136 } 11137 } 11138 break; 11139 } 11140 return false; 11141} 11142 11143 11144 11145/// getConstraintType - Given a constraint letter, return the type of 11146/// constraint it is for this target. 11147X86TargetLowering::ConstraintType 11148X86TargetLowering::getConstraintType(const std::string &Constraint) const { 11149 if (Constraint.size() == 1) { 11150 switch (Constraint[0]) { 11151 case 'A': 11152 return C_Register; 11153 case 'f': 11154 case 'r': 11155 case 'R': 11156 case 'l': 11157 case 'q': 11158 case 'Q': 11159 case 'x': 11160 case 'y': 11161 case 'Y': 11162 return C_RegisterClass; 11163 case 'e': 11164 case 'Z': 11165 return C_Other; 11166 default: 11167 break; 11168 } 11169 } 11170 return TargetLowering::getConstraintType(Constraint); 11171} 11172 11173/// Examine constraint type and operand type and determine a weight value, 11174/// where: -1 = invalid match, and 0 = so-so match to 3 = good match. 11175/// This object must already have been set up with the operand type 11176/// and the current alternative constraint selected. 11177int X86TargetLowering::getSingleConstraintMatchWeight( 11178 AsmOperandInfo &info, const char *constraint) const { 11179 int weight = -1; 11180 Value *CallOperandVal = info.CallOperandVal; 11181 // If we don't have a value, we can't do a match, 11182 // but allow it at the lowest weight. 11183 if (CallOperandVal == NULL) 11184 return 0; 11185 // Look at the constraint type. 11186 switch (*constraint) { 11187 default: 11188 return TargetLowering::getSingleConstraintMatchWeight(info, constraint); 11189 break; 11190 case 'I': 11191 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { 11192 if (C->getZExtValue() <= 31) 11193 weight = 3; 11194 } 11195 break; 11196 // etc. 11197 } 11198 return weight; 11199} 11200 11201/// LowerXConstraint - try to replace an X constraint, which matches anything, 11202/// with another that has more specific requirements based on the type of the 11203/// corresponding operand. 11204const char *X86TargetLowering:: 11205LowerXConstraint(EVT ConstraintVT) const { 11206 // FP X constraints get lowered to SSE1/2 registers if available, otherwise 11207 // 'f' like normal targets. 11208 if (ConstraintVT.isFloatingPoint()) { 11209 if (Subtarget->hasSSE2()) 11210 return "Y"; 11211 if (Subtarget->hasSSE1()) 11212 return "x"; 11213 } 11214 11215 return TargetLowering::LowerXConstraint(ConstraintVT); 11216} 11217 11218/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 11219/// vector. If it is invalid, don't add anything to Ops. 11220void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 11221 char Constraint, 11222 std::vector<SDValue>&Ops, 11223 SelectionDAG &DAG) const { 11224 SDValue Result(0, 0); 11225 11226 switch (Constraint) { 11227 default: break; 11228 case 'I': 11229 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 11230 if (C->getZExtValue() <= 31) { 11231 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 11232 break; 11233 } 11234 } 11235 return; 11236 case 'J': 11237 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 11238 if (C->getZExtValue() <= 63) { 11239 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 11240 break; 11241 } 11242 } 11243 return; 11244 case 'K': 11245 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 11246 if ((int8_t)C->getSExtValue() == C->getSExtValue()) { 11247 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 11248 break; 11249 } 11250 } 11251 return; 11252 case 'N': 11253 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 11254 if (C->getZExtValue() <= 255) { 11255 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 11256 break; 11257 } 11258 } 11259 return; 11260 case 'e': { 11261 // 32-bit signed value 11262 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 11263 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 11264 C->getSExtValue())) { 11265 // Widen to 64 bits here to get it sign extended. 11266 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64); 11267 break; 11268 } 11269 // FIXME gcc accepts some relocatable values here too, but only in certain 11270 // memory models; it's complicated. 11271 } 11272 return; 11273 } 11274 case 'Z': { 11275 // 32-bit unsigned value 11276 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 11277 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 11278 C->getZExtValue())) { 11279 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 11280 break; 11281 } 11282 } 11283 // FIXME gcc accepts some relocatable values here too, but only in certain 11284 // memory models; it's complicated. 11285 return; 11286 } 11287 case 'i': { 11288 // Literal immediates are always ok. 11289 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 11290 // Widen to 64 bits here to get it sign extended. 11291 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64); 11292 break; 11293 } 11294 11295 // In any sort of PIC mode addresses need to be computed at runtime by 11296 // adding in a register or some sort of table lookup. These can't 11297 // be used as immediates. 11298 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC()) 11299 return; 11300 11301 // If we are in non-pic codegen mode, we allow the address of a global (with 11302 // an optional displacement) to be used with 'i'. 11303 GlobalAddressSDNode *GA = 0; 11304 int64_t Offset = 0; 11305 11306 // Match either (GA), (GA+C), (GA+C1+C2), etc. 11307 while (1) { 11308 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) { 11309 Offset += GA->getOffset(); 11310 break; 11311 } else if (Op.getOpcode() == ISD::ADD) { 11312 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 11313 Offset += C->getZExtValue(); 11314 Op = Op.getOperand(0); 11315 continue; 11316 } 11317 } else if (Op.getOpcode() == ISD::SUB) { 11318 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 11319 Offset += -C->getZExtValue(); 11320 Op = Op.getOperand(0); 11321 continue; 11322 } 11323 } 11324 11325 // Otherwise, this isn't something we can handle, reject it. 11326 return; 11327 } 11328 11329 const GlobalValue *GV = GA->getGlobal(); 11330 // If we require an extra load to get this address, as in PIC mode, we 11331 // can't accept it. 11332 if (isGlobalStubReference(Subtarget->ClassifyGlobalReference(GV, 11333 getTargetMachine()))) 11334 return; 11335 11336 Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), 11337 GA->getValueType(0), Offset); 11338 break; 11339 } 11340 } 11341 11342 if (Result.getNode()) { 11343 Ops.push_back(Result); 11344 return; 11345 } 11346 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 11347} 11348 11349std::vector<unsigned> X86TargetLowering:: 11350getRegClassForInlineAsmConstraint(const std::string &Constraint, 11351 EVT VT) const { 11352 if (Constraint.size() == 1) { 11353 // FIXME: not handling fp-stack yet! 11354 switch (Constraint[0]) { // GCC X86 Constraint Letters 11355 default: break; // Unknown constraint letter 11356 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode. 11357 if (Subtarget->is64Bit()) { 11358 if (VT == MVT::i32) 11359 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 11360 X86::ESI, X86::EDI, X86::R8D, X86::R9D, 11361 X86::R10D,X86::R11D,X86::R12D, 11362 X86::R13D,X86::R14D,X86::R15D, 11363 X86::EBP, X86::ESP, 0); 11364 else if (VT == MVT::i16) 11365 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 11366 X86::SI, X86::DI, X86::R8W,X86::R9W, 11367 X86::R10W,X86::R11W,X86::R12W, 11368 X86::R13W,X86::R14W,X86::R15W, 11369 X86::BP, X86::SP, 0); 11370 else if (VT == MVT::i8) 11371 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 11372 X86::SIL, X86::DIL, X86::R8B,X86::R9B, 11373 X86::R10B,X86::R11B,X86::R12B, 11374 X86::R13B,X86::R14B,X86::R15B, 11375 X86::BPL, X86::SPL, 0); 11376 11377 else if (VT == MVT::i64) 11378 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 11379 X86::RSI, X86::RDI, X86::R8, X86::R9, 11380 X86::R10, X86::R11, X86::R12, 11381 X86::R13, X86::R14, X86::R15, 11382 X86::RBP, X86::RSP, 0); 11383 11384 break; 11385 } 11386 // 32-bit fallthrough 11387 case 'Q': // Q_REGS 11388 if (VT == MVT::i32) 11389 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 11390 else if (VT == MVT::i16) 11391 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 11392 else if (VT == MVT::i8) 11393 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0); 11394 else if (VT == MVT::i64) 11395 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0); 11396 break; 11397 } 11398 } 11399 11400 return std::vector<unsigned>(); 11401} 11402 11403std::pair<unsigned, const TargetRegisterClass*> 11404X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 11405 EVT VT) const { 11406 // First, see if this is a constraint that directly corresponds to an LLVM 11407 // register class. 11408 if (Constraint.size() == 1) { 11409 // GCC Constraint Letters 11410 switch (Constraint[0]) { 11411 default: break; 11412 case 'r': // GENERAL_REGS 11413 case 'l': // INDEX_REGS 11414 if (VT == MVT::i8) 11415 return std::make_pair(0U, X86::GR8RegisterClass); 11416 if (VT == MVT::i16) 11417 return std::make_pair(0U, X86::GR16RegisterClass); 11418 if (VT == MVT::i32 || !Subtarget->is64Bit()) 11419 return std::make_pair(0U, X86::GR32RegisterClass); 11420 return std::make_pair(0U, X86::GR64RegisterClass); 11421 case 'R': // LEGACY_REGS 11422 if (VT == MVT::i8) 11423 return std::make_pair(0U, X86::GR8_NOREXRegisterClass); 11424 if (VT == MVT::i16) 11425 return std::make_pair(0U, X86::GR16_NOREXRegisterClass); 11426 if (VT == MVT::i32 || !Subtarget->is64Bit()) 11427 return std::make_pair(0U, X86::GR32_NOREXRegisterClass); 11428 return std::make_pair(0U, X86::GR64_NOREXRegisterClass); 11429 case 'f': // FP Stack registers. 11430 // If SSE is enabled for this VT, use f80 to ensure the isel moves the 11431 // value to the correct fpstack register class. 11432 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) 11433 return std::make_pair(0U, X86::RFP32RegisterClass); 11434 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) 11435 return std::make_pair(0U, X86::RFP64RegisterClass); 11436 return std::make_pair(0U, X86::RFP80RegisterClass); 11437 case 'y': // MMX_REGS if MMX allowed. 11438 if (!Subtarget->hasMMX()) break; 11439 return std::make_pair(0U, X86::VR64RegisterClass); 11440 case 'Y': // SSE_REGS if SSE2 allowed 11441 if (!Subtarget->hasSSE2()) break; 11442 // FALL THROUGH. 11443 case 'x': // SSE_REGS if SSE1 allowed 11444 if (!Subtarget->hasSSE1()) break; 11445 11446 switch (VT.getSimpleVT().SimpleTy) { 11447 default: break; 11448 // Scalar SSE types. 11449 case MVT::f32: 11450 case MVT::i32: 11451 return std::make_pair(0U, X86::FR32RegisterClass); 11452 case MVT::f64: 11453 case MVT::i64: 11454 return std::make_pair(0U, X86::FR64RegisterClass); 11455 // Vector types. 11456 case MVT::v16i8: 11457 case MVT::v8i16: 11458 case MVT::v4i32: 11459 case MVT::v2i64: 11460 case MVT::v4f32: 11461 case MVT::v2f64: 11462 return std::make_pair(0U, X86::VR128RegisterClass); 11463 } 11464 break; 11465 } 11466 } 11467 11468 // Use the default implementation in TargetLowering to convert the register 11469 // constraint into a member of a register class. 11470 std::pair<unsigned, const TargetRegisterClass*> Res; 11471 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 11472 11473 // Not found as a standard register? 11474 if (Res.second == 0) { 11475 // Map st(0) -> st(7) -> ST0 11476 if (Constraint.size() == 7 && Constraint[0] == '{' && 11477 tolower(Constraint[1]) == 's' && 11478 tolower(Constraint[2]) == 't' && 11479 Constraint[3] == '(' && 11480 (Constraint[4] >= '0' && Constraint[4] <= '7') && 11481 Constraint[5] == ')' && 11482 Constraint[6] == '}') { 11483 11484 Res.first = X86::ST0+Constraint[4]-'0'; 11485 Res.second = X86::RFP80RegisterClass; 11486 return Res; 11487 } 11488 11489 // GCC allows "st(0)" to be called just plain "st". 11490 if (StringRef("{st}").equals_lower(Constraint)) { 11491 Res.first = X86::ST0; 11492 Res.second = X86::RFP80RegisterClass; 11493 return Res; 11494 } 11495 11496 // flags -> EFLAGS 11497 if (StringRef("{flags}").equals_lower(Constraint)) { 11498 Res.first = X86::EFLAGS; 11499 Res.second = X86::CCRRegisterClass; 11500 return Res; 11501 } 11502 11503 // 'A' means EAX + EDX. 11504 if (Constraint == "A") { 11505 Res.first = X86::EAX; 11506 Res.second = X86::GR32_ADRegisterClass; 11507 return Res; 11508 } 11509 return Res; 11510 } 11511 11512 // Otherwise, check to see if this is a register class of the wrong value 11513 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 11514 // turn into {ax},{dx}. 11515 if (Res.second->hasType(VT)) 11516 return Res; // Correct type already, nothing to do. 11517 11518 // All of the single-register GCC register classes map their values onto 11519 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 11520 // really want an 8-bit or 32-bit register, map to the appropriate register 11521 // class and return the appropriate register. 11522 if (Res.second == X86::GR16RegisterClass) { 11523 if (VT == MVT::i8) { 11524 unsigned DestReg = 0; 11525 switch (Res.first) { 11526 default: break; 11527 case X86::AX: DestReg = X86::AL; break; 11528 case X86::DX: DestReg = X86::DL; break; 11529 case X86::CX: DestReg = X86::CL; break; 11530 case X86::BX: DestReg = X86::BL; break; 11531 } 11532 if (DestReg) { 11533 Res.first = DestReg; 11534 Res.second = X86::GR8RegisterClass; 11535 } 11536 } else if (VT == MVT::i32) { 11537 unsigned DestReg = 0; 11538 switch (Res.first) { 11539 default: break; 11540 case X86::AX: DestReg = X86::EAX; break; 11541 case X86::DX: DestReg = X86::EDX; break; 11542 case X86::CX: DestReg = X86::ECX; break; 11543 case X86::BX: DestReg = X86::EBX; break; 11544 case X86::SI: DestReg = X86::ESI; break; 11545 case X86::DI: DestReg = X86::EDI; break; 11546 case X86::BP: DestReg = X86::EBP; break; 11547 case X86::SP: DestReg = X86::ESP; break; 11548 } 11549 if (DestReg) { 11550 Res.first = DestReg; 11551 Res.second = X86::GR32RegisterClass; 11552 } 11553 } else if (VT == MVT::i64) { 11554 unsigned DestReg = 0; 11555 switch (Res.first) { 11556 default: break; 11557 case X86::AX: DestReg = X86::RAX; break; 11558 case X86::DX: DestReg = X86::RDX; break; 11559 case X86::CX: DestReg = X86::RCX; break; 11560 case X86::BX: DestReg = X86::RBX; break; 11561 case X86::SI: DestReg = X86::RSI; break; 11562 case X86::DI: DestReg = X86::RDI; break; 11563 case X86::BP: DestReg = X86::RBP; break; 11564 case X86::SP: DestReg = X86::RSP; break; 11565 } 11566 if (DestReg) { 11567 Res.first = DestReg; 11568 Res.second = X86::GR64RegisterClass; 11569 } 11570 } 11571 } else if (Res.second == X86::FR32RegisterClass || 11572 Res.second == X86::FR64RegisterClass || 11573 Res.second == X86::VR128RegisterClass) { 11574 // Handle references to XMM physical registers that got mapped into the 11575 // wrong class. This can happen with constraints like {xmm0} where the 11576 // target independent register mapper will just pick the first match it can 11577 // find, ignoring the required type. 11578 if (VT == MVT::f32) 11579 Res.second = X86::FR32RegisterClass; 11580 else if (VT == MVT::f64) 11581 Res.second = X86::FR64RegisterClass; 11582 else if (X86::VR128RegisterClass->hasType(VT)) 11583 Res.second = X86::VR128RegisterClass; 11584 } 11585 11586 return Res; 11587} 11588