X86ISelLowering.cpp revision 6f836adafee88669273e9302e3344c4b9cef8a0d
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86ISelLowering.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86TargetMachine.h" 20#include "llvm/CallingConv.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/ADT/BitVector.h" 27#include "llvm/ADT/VectorExtras.h" 28#include "llvm/Analysis/ScalarEvolutionExpressions.h" 29#include "llvm/CodeGen/CallingConvLower.h" 30#include "llvm/CodeGen/MachineFrameInfo.h" 31#include "llvm/CodeGen/MachineFunction.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/MachineModuleInfo.h" 34#include "llvm/CodeGen/MachineRegisterInfo.h" 35#include "llvm/CodeGen/PseudoSourceValue.h" 36#include "llvm/CodeGen/SelectionDAG.h" 37#include "llvm/Support/MathExtras.h" 38#include "llvm/Support/Debug.h" 39#include "llvm/Target/TargetOptions.h" 40#include "llvm/ADT/SmallSet.h" 41#include "llvm/ADT/StringExtras.h" 42using namespace llvm; 43 44X86TargetLowering::X86TargetLowering(TargetMachine &TM) 45 : TargetLowering(TM) { 46 Subtarget = &TM.getSubtarget<X86Subtarget>(); 47 X86ScalarSSEf64 = Subtarget->hasSSE2(); 48 X86ScalarSSEf32 = Subtarget->hasSSE1(); 49 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 50 51 bool Fast = false; 52 53 RegInfo = TM.getRegisterInfo(); 54 55 // Set up the TargetLowering object. 56 57 // X86 is weird, it always uses i8 for shift amounts and setcc results. 58 setShiftAmountType(MVT::i8); 59 setSetCCResultContents(ZeroOrOneSetCCResult); 60 setSchedulingPreference(SchedulingForRegPressure); 61 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0 62 setStackPointerRegisterToSaveRestore(X86StackPtr); 63 64 if (Subtarget->isTargetDarwin()) { 65 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 66 setUseUnderscoreSetJmp(false); 67 setUseUnderscoreLongJmp(false); 68 } else if (Subtarget->isTargetMingw()) { 69 // MS runtime is weird: it exports _setjmp, but longjmp! 70 setUseUnderscoreSetJmp(true); 71 setUseUnderscoreLongJmp(false); 72 } else { 73 setUseUnderscoreSetJmp(true); 74 setUseUnderscoreLongJmp(true); 75 } 76 77 // Set up the register classes. 78 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 79 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 80 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 81 if (Subtarget->is64Bit()) 82 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 83 84 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote); 85 86 // We don't accept any truncstore of integer registers. 87 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 88 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 89 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 90 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 91 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 92 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 93 94 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 95 // operation. 96 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 97 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 98 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 99 100 if (Subtarget->is64Bit()) { 101 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 102 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 103 } else { 104 if (X86ScalarSSEf64) 105 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP. 106 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand); 107 else 108 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 109 } 110 111 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 112 // this operation. 113 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 114 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 115 // SSE has no i16 to fp conversion, only i32 116 if (X86ScalarSSEf32) { 117 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 118 // f32 and f64 cases are Legal, f80 case is not 119 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 120 } else { 121 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 122 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 123 } 124 125 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 126 // are Legal, f80 is custom lowered. 127 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 128 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 129 130 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 131 // this operation. 132 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 133 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 134 135 if (X86ScalarSSEf32) { 136 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 137 // f32 and f64 cases are Legal, f80 case is not 138 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 139 } else { 140 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 141 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 142 } 143 144 // Handle FP_TO_UINT by promoting the destination to a larger signed 145 // conversion. 146 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 147 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 148 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 149 150 if (Subtarget->is64Bit()) { 151 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 152 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 153 } else { 154 if (X86ScalarSSEf32 && !Subtarget->hasSSE3()) 155 // Expand FP_TO_UINT into a select. 156 // FIXME: We would like to use a Custom expander here eventually to do 157 // the optimal thing for SSE vs. the default expansion in the legalizer. 158 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 159 else 160 // With SSE3 we can use fisttpll to convert to a signed i64. 161 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 162 } 163 164 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 165 if (!X86ScalarSSEf64) { 166 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 167 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 168 } 169 170 // Scalar integer divide and remainder are lowered to use operations that 171 // produce two results, to match the available instructions. This exposes 172 // the two-result form to trivial CSE, which is able to combine x/y and x%y 173 // into a single instruction. 174 // 175 // Scalar integer multiply-high is also lowered to use two-result 176 // operations, to match the available instructions. However, plain multiply 177 // (low) operations are left as Legal, as there are single-result 178 // instructions for this in x86. Using the two-result multiply instructions 179 // when both high and low results are needed must be arranged by dagcombine. 180 setOperationAction(ISD::MULHS , MVT::i8 , Expand); 181 setOperationAction(ISD::MULHU , MVT::i8 , Expand); 182 setOperationAction(ISD::SDIV , MVT::i8 , Expand); 183 setOperationAction(ISD::UDIV , MVT::i8 , Expand); 184 setOperationAction(ISD::SREM , MVT::i8 , Expand); 185 setOperationAction(ISD::UREM , MVT::i8 , Expand); 186 setOperationAction(ISD::MULHS , MVT::i16 , Expand); 187 setOperationAction(ISD::MULHU , MVT::i16 , Expand); 188 setOperationAction(ISD::SDIV , MVT::i16 , Expand); 189 setOperationAction(ISD::UDIV , MVT::i16 , Expand); 190 setOperationAction(ISD::SREM , MVT::i16 , Expand); 191 setOperationAction(ISD::UREM , MVT::i16 , Expand); 192 setOperationAction(ISD::MULHS , MVT::i32 , Expand); 193 setOperationAction(ISD::MULHU , MVT::i32 , Expand); 194 setOperationAction(ISD::SDIV , MVT::i32 , Expand); 195 setOperationAction(ISD::UDIV , MVT::i32 , Expand); 196 setOperationAction(ISD::SREM , MVT::i32 , Expand); 197 setOperationAction(ISD::UREM , MVT::i32 , Expand); 198 setOperationAction(ISD::MULHS , MVT::i64 , Expand); 199 setOperationAction(ISD::MULHU , MVT::i64 , Expand); 200 setOperationAction(ISD::SDIV , MVT::i64 , Expand); 201 setOperationAction(ISD::UDIV , MVT::i64 , Expand); 202 setOperationAction(ISD::SREM , MVT::i64 , Expand); 203 setOperationAction(ISD::UREM , MVT::i64 , Expand); 204 205 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 206 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 207 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 208 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 209 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); 210 if (Subtarget->is64Bit()) 211 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 212 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 213 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 214 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 215 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 216 setOperationAction(ISD::FREM , MVT::f32 , Expand); 217 setOperationAction(ISD::FREM , MVT::f64 , Expand); 218 setOperationAction(ISD::FREM , MVT::f80 , Expand); 219 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 220 221 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 222 setOperationAction(ISD::CTTZ , MVT::i8 , Custom); 223 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 224 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 225 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 226 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 227 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 228 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 229 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 230 if (Subtarget->is64Bit()) { 231 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 232 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 233 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 234 } 235 236 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 237 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 238 239 // These should be promoted to a larger select which is supported. 240 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 241 setOperationAction(ISD::SELECT , MVT::i8 , Promote); 242 // X86 wants to expand cmov itself. 243 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 244 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 245 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 246 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 247 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 248 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 249 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 250 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 251 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 252 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 253 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 254 if (Subtarget->is64Bit()) { 255 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 256 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 257 } 258 // X86 ret instruction may pop stack. 259 setOperationAction(ISD::RET , MVT::Other, Custom); 260 if (!Subtarget->is64Bit()) 261 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 262 263 // Darwin ABI issue. 264 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 265 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 266 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 267 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 268 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 269 if (Subtarget->is64Bit()) { 270 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 271 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 272 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 273 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 274 } 275 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 276 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 277 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 278 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 279 if (Subtarget->is64Bit()) { 280 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 281 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 282 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 283 } 284 // X86 wants to expand memset / memcpy itself. 285 setOperationAction(ISD::MEMSET , MVT::Other, Custom); 286 setOperationAction(ISD::MEMCPY , MVT::Other, Custom); 287 288 if (Subtarget->hasSSE1()) 289 setOperationAction(ISD::PREFETCH , MVT::Other, Legal); 290 291 if (!Subtarget->hasSSE2()) 292 setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand); 293 294 setOperationAction(ISD::ATOMIC_LCS , MVT::i8, Custom); 295 setOperationAction(ISD::ATOMIC_LCS , MVT::i16, Custom); 296 setOperationAction(ISD::ATOMIC_LCS , MVT::i32, Custom); 297 setOperationAction(ISD::ATOMIC_LCS , MVT::i64, Custom); 298 299 // Use the default ISD::LOCATION, ISD::DECLARE expansion. 300 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 301 // FIXME - use subtarget debug flags 302 if (!Subtarget->isTargetDarwin() && 303 !Subtarget->isTargetELF() && 304 !Subtarget->isTargetCygMing()) 305 setOperationAction(ISD::LABEL, MVT::Other, Expand); 306 307 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 308 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 309 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 310 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 311 if (Subtarget->is64Bit()) { 312 // FIXME: Verify 313 setExceptionPointerRegister(X86::RAX); 314 setExceptionSelectorRegister(X86::RDX); 315 } else { 316 setExceptionPointerRegister(X86::EAX); 317 setExceptionSelectorRegister(X86::EDX); 318 } 319 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 320 321 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 322 323 setOperationAction(ISD::TRAP, MVT::Other, Legal); 324 325 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 326 setOperationAction(ISD::VASTART , MVT::Other, Custom); 327 setOperationAction(ISD::VAARG , MVT::Other, Expand); 328 setOperationAction(ISD::VAEND , MVT::Other, Expand); 329 if (Subtarget->is64Bit()) 330 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 331 else 332 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 333 334 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 335 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 336 if (Subtarget->is64Bit()) 337 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 338 if (Subtarget->isTargetCygMing()) 339 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 340 else 341 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 342 343 if (X86ScalarSSEf64) { 344 // f32 and f64 use SSE. 345 // Set up the FP register classes. 346 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 347 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 348 349 // Use ANDPD to simulate FABS. 350 setOperationAction(ISD::FABS , MVT::f64, Custom); 351 setOperationAction(ISD::FABS , MVT::f32, Custom); 352 353 // Use XORP to simulate FNEG. 354 setOperationAction(ISD::FNEG , MVT::f64, Custom); 355 setOperationAction(ISD::FNEG , MVT::f32, Custom); 356 357 // Use ANDPD and ORPD to simulate FCOPYSIGN. 358 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 359 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 360 361 // We don't support sin/cos/fmod 362 setOperationAction(ISD::FSIN , MVT::f64, Expand); 363 setOperationAction(ISD::FCOS , MVT::f64, Expand); 364 setOperationAction(ISD::FSIN , MVT::f32, Expand); 365 setOperationAction(ISD::FCOS , MVT::f32, Expand); 366 367 // Expand FP immediates into loads from the stack, except for the special 368 // cases we handle. 369 addLegalFPImmediate(APFloat(+0.0)); // xorpd 370 addLegalFPImmediate(APFloat(+0.0f)); // xorps 371 372 // Floating truncations from f80 and extensions to f80 go through memory. 373 // If optimizing, we lie about this though and handle it in 374 // InstructionSelectPreprocess so that dagcombine2 can hack on these. 375 if (Fast) { 376 setConvertAction(MVT::f32, MVT::f80, Expand); 377 setConvertAction(MVT::f64, MVT::f80, Expand); 378 setConvertAction(MVT::f80, MVT::f32, Expand); 379 setConvertAction(MVT::f80, MVT::f64, Expand); 380 } 381 } else if (X86ScalarSSEf32) { 382 // Use SSE for f32, x87 for f64. 383 // Set up the FP register classes. 384 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 385 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 386 387 // Use ANDPS to simulate FABS. 388 setOperationAction(ISD::FABS , MVT::f32, Custom); 389 390 // Use XORP to simulate FNEG. 391 setOperationAction(ISD::FNEG , MVT::f32, Custom); 392 393 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 394 395 // Use ANDPS and ORPS to simulate FCOPYSIGN. 396 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 397 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 398 399 // We don't support sin/cos/fmod 400 setOperationAction(ISD::FSIN , MVT::f32, Expand); 401 setOperationAction(ISD::FCOS , MVT::f32, Expand); 402 403 // Special cases we handle for FP constants. 404 addLegalFPImmediate(APFloat(+0.0f)); // xorps 405 addLegalFPImmediate(APFloat(+0.0)); // FLD0 406 addLegalFPImmediate(APFloat(+1.0)); // FLD1 407 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 408 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 409 410 // SSE <-> X87 conversions go through memory. If optimizing, we lie about 411 // this though and handle it in InstructionSelectPreprocess so that 412 // dagcombine2 can hack on these. 413 if (Fast) { 414 setConvertAction(MVT::f32, MVT::f64, Expand); 415 setConvertAction(MVT::f32, MVT::f80, Expand); 416 setConvertAction(MVT::f80, MVT::f32, Expand); 417 setConvertAction(MVT::f64, MVT::f32, Expand); 418 // And x87->x87 truncations also. 419 setConvertAction(MVT::f80, MVT::f64, Expand); 420 } 421 422 if (!UnsafeFPMath) { 423 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 424 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 425 } 426 } else { 427 // f32 and f64 in x87. 428 // Set up the FP register classes. 429 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 430 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 431 432 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 433 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 434 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 435 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 436 437 // Floating truncations go through memory. If optimizing, we lie about 438 // this though and handle it in InstructionSelectPreprocess so that 439 // dagcombine2 can hack on these. 440 if (Fast) { 441 setConvertAction(MVT::f80, MVT::f32, Expand); 442 setConvertAction(MVT::f64, MVT::f32, Expand); 443 setConvertAction(MVT::f80, MVT::f64, Expand); 444 } 445 446 if (!UnsafeFPMath) { 447 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 448 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 449 } 450 addLegalFPImmediate(APFloat(+0.0)); // FLD0 451 addLegalFPImmediate(APFloat(+1.0)); // FLD1 452 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 453 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 454 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 455 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 456 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 457 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 458 } 459 460 // Long double always uses X87. 461 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 462 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 463 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 464 { 465 APFloat TmpFlt(+0.0); 466 TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 467 addLegalFPImmediate(TmpFlt); // FLD0 468 TmpFlt.changeSign(); 469 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 470 APFloat TmpFlt2(+1.0); 471 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 472 addLegalFPImmediate(TmpFlt2); // FLD1 473 TmpFlt2.changeSign(); 474 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 475 } 476 477 if (!UnsafeFPMath) { 478 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 479 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 480 } 481 482 // Always use a library call for pow. 483 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 484 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 485 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 486 487 // First set operation action for all vector types to expand. Then we 488 // will selectively turn on ones that can be effectively codegen'd. 489 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 490 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 491 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); 492 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); 493 setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); 494 setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); 495 setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); 496 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 497 setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); 498 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 499 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 500 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 501 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 502 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 503 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); 504 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); 505 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 506 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 507 setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand); 508 setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand); 509 setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand); 510 setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand); 511 setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand); 512 setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand); 513 setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand); 514 setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand); 515 setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand); 516 setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand); 517 setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand); 518 setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand); 519 setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand); 520 setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand); 521 setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand); 522 setOperationAction(ISD::SHL, (MVT::ValueType)VT, Expand); 523 setOperationAction(ISD::SRA, (MVT::ValueType)VT, Expand); 524 setOperationAction(ISD::SRL, (MVT::ValueType)VT, Expand); 525 setOperationAction(ISD::ROTL, (MVT::ValueType)VT, Expand); 526 setOperationAction(ISD::ROTR, (MVT::ValueType)VT, Expand); 527 setOperationAction(ISD::BSWAP, (MVT::ValueType)VT, Expand); 528 } 529 530 if (Subtarget->hasMMX()) { 531 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass); 532 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass); 533 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass); 534 addRegisterClass(MVT::v1i64, X86::VR64RegisterClass); 535 536 // FIXME: add MMX packed arithmetics 537 538 setOperationAction(ISD::ADD, MVT::v8i8, Legal); 539 setOperationAction(ISD::ADD, MVT::v4i16, Legal); 540 setOperationAction(ISD::ADD, MVT::v2i32, Legal); 541 setOperationAction(ISD::ADD, MVT::v1i64, Legal); 542 543 setOperationAction(ISD::SUB, MVT::v8i8, Legal); 544 setOperationAction(ISD::SUB, MVT::v4i16, Legal); 545 setOperationAction(ISD::SUB, MVT::v2i32, Legal); 546 setOperationAction(ISD::SUB, MVT::v1i64, Legal); 547 548 setOperationAction(ISD::MULHS, MVT::v4i16, Legal); 549 setOperationAction(ISD::MUL, MVT::v4i16, Legal); 550 551 setOperationAction(ISD::AND, MVT::v8i8, Promote); 552 AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64); 553 setOperationAction(ISD::AND, MVT::v4i16, Promote); 554 AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64); 555 setOperationAction(ISD::AND, MVT::v2i32, Promote); 556 AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64); 557 setOperationAction(ISD::AND, MVT::v1i64, Legal); 558 559 setOperationAction(ISD::OR, MVT::v8i8, Promote); 560 AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64); 561 setOperationAction(ISD::OR, MVT::v4i16, Promote); 562 AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64); 563 setOperationAction(ISD::OR, MVT::v2i32, Promote); 564 AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64); 565 setOperationAction(ISD::OR, MVT::v1i64, Legal); 566 567 setOperationAction(ISD::XOR, MVT::v8i8, Promote); 568 AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64); 569 setOperationAction(ISD::XOR, MVT::v4i16, Promote); 570 AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64); 571 setOperationAction(ISD::XOR, MVT::v2i32, Promote); 572 AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64); 573 setOperationAction(ISD::XOR, MVT::v1i64, Legal); 574 575 setOperationAction(ISD::LOAD, MVT::v8i8, Promote); 576 AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64); 577 setOperationAction(ISD::LOAD, MVT::v4i16, Promote); 578 AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64); 579 setOperationAction(ISD::LOAD, MVT::v2i32, Promote); 580 AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64); 581 setOperationAction(ISD::LOAD, MVT::v1i64, Legal); 582 583 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); 584 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 585 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); 586 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); 587 588 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 589 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 590 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); 591 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); 592 593 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom); 594 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom); 595 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom); 596 } 597 598 if (Subtarget->hasSSE1()) { 599 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 600 601 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 602 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 603 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 604 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 605 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 606 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 607 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 608 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 609 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 610 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 611 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 612 } 613 614 if (Subtarget->hasSSE2()) { 615 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 616 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 617 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 618 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 619 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 620 621 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 622 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 623 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 624 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 625 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 626 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 627 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 628 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 629 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 630 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 631 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 632 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 633 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 634 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 635 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 636 637 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 638 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 639 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 640 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 641 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 642 643 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 644 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 645 // Do not attempt to custom lower non-power-of-2 vectors 646 if (!isPowerOf2_32(MVT::getVectorNumElements(VT))) 647 continue; 648 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); 649 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); 650 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); 651 } 652 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 653 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 654 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 655 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 656 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 657 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 658 if (Subtarget->is64Bit()) { 659 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 660 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 661 } 662 663 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 664 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 665 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); 666 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); 667 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); 668 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); 669 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); 670 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); 671 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); 672 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); 673 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 674 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); 675 } 676 677 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 678 679 // Custom lower v2i64 and v2f64 selects. 680 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 681 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 682 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 683 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 684 } 685 686 if (Subtarget->hasSSE41()) { 687 // FIXME: Do we need to handle scalar-to-vector here? 688 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 689 690 // i8 and i16 vectors are custom , because the source register and source 691 // source memory operand types are not the same width. f32 vectors are 692 // custom since the immediate controlling the insert encodes additional 693 // information. 694 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 695 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 696 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Legal); 697 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 698 699 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 700 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 701 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 702 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 703 704 if (Subtarget->is64Bit()) { 705 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal); 706 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 707 } 708 } 709 710 // We want to custom lower some of our intrinsics. 711 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 712 713 // We have target-specific dag combine patterns for the following nodes: 714 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 715 setTargetDAGCombine(ISD::SELECT); 716 setTargetDAGCombine(ISD::STORE); 717 718 computeRegisterProperties(); 719 720 // FIXME: These should be based on subtarget info. Plus, the values should 721 // be smaller when we are in optimizing for size mode. 722 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores 723 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores 724 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores 725 allowUnalignedMemoryAccesses = true; // x86 supports it! 726 setPrefLoopAlignment(16); 727} 728 729 730MVT::ValueType 731X86TargetLowering::getSetCCResultType(const SDOperand &) const { 732 return MVT::i8; 733} 734 735 736/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 737/// the desired ByVal argument alignment. 738static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) { 739 if (MaxAlign == 16) 740 return; 741 if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 742 if (VTy->getBitWidth() == 128) 743 MaxAlign = 16; 744 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 745 unsigned EltAlign = 0; 746 getMaxByValAlign(ATy->getElementType(), EltAlign); 747 if (EltAlign > MaxAlign) 748 MaxAlign = EltAlign; 749 } else if (const StructType *STy = dyn_cast<StructType>(Ty)) { 750 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 751 unsigned EltAlign = 0; 752 getMaxByValAlign(STy->getElementType(i), EltAlign); 753 if (EltAlign > MaxAlign) 754 MaxAlign = EltAlign; 755 if (MaxAlign == 16) 756 break; 757 } 758 } 759 return; 760} 761 762/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 763/// function arguments in the caller parameter area. For X86, aggregates 764/// that contain SSE vectors are placed at 16-byte boundaries while the rest 765/// are at 4-byte boundaries. 766unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const { 767 if (Subtarget->is64Bit()) 768 return getTargetData()->getABITypeAlignment(Ty); 769 unsigned Align = 4; 770 if (Subtarget->hasSSE1()) 771 getMaxByValAlign(Ty, Align); 772 return Align; 773} 774 775/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 776/// jumptable. 777SDOperand X86TargetLowering::getPICJumpTableRelocBase(SDOperand Table, 778 SelectionDAG &DAG) const { 779 if (usesGlobalOffsetTable()) 780 return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy()); 781 if (!Subtarget->isPICStyleRIPRel()) 782 return DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()); 783 return Table; 784} 785 786//===----------------------------------------------------------------------===// 787// Return Value Calling Convention Implementation 788//===----------------------------------------------------------------------===// 789 790#include "X86GenCallingConv.inc" 791 792/// GetPossiblePreceedingTailCall - Get preceeding X86ISD::TAILCALL node if it 793/// exists skip possible ISD:TokenFactor. 794static SDOperand GetPossiblePreceedingTailCall(SDOperand Chain) { 795 if (Chain.getOpcode() == X86ISD::TAILCALL) { 796 return Chain; 797 } else if (Chain.getOpcode() == ISD::TokenFactor) { 798 if (Chain.getNumOperands() && 799 Chain.getOperand(0).getOpcode() == X86ISD::TAILCALL) 800 return Chain.getOperand(0); 801 } 802 return Chain; 803} 804 805/// LowerRET - Lower an ISD::RET node. 806SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { 807 assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args"); 808 809 SmallVector<CCValAssign, 16> RVLocs; 810 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 811 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 812 CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs); 813 CCInfo.AnalyzeReturn(Op.Val, RetCC_X86); 814 815 // If this is the first return lowered for this function, add the regs to the 816 // liveout set for the function. 817 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 818 for (unsigned i = 0; i != RVLocs.size(); ++i) 819 if (RVLocs[i].isRegLoc()) 820 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 821 } 822 SDOperand Chain = Op.getOperand(0); 823 824 // Handle tail call return. 825 Chain = GetPossiblePreceedingTailCall(Chain); 826 if (Chain.getOpcode() == X86ISD::TAILCALL) { 827 SDOperand TailCall = Chain; 828 SDOperand TargetAddress = TailCall.getOperand(1); 829 SDOperand StackAdjustment = TailCall.getOperand(2); 830 assert(((TargetAddress.getOpcode() == ISD::Register && 831 (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::ECX || 832 cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) || 833 TargetAddress.getOpcode() == ISD::TargetExternalSymbol || 834 TargetAddress.getOpcode() == ISD::TargetGlobalAddress) && 835 "Expecting an global address, external symbol, or register"); 836 assert(StackAdjustment.getOpcode() == ISD::Constant && 837 "Expecting a const value"); 838 839 SmallVector<SDOperand,8> Operands; 840 Operands.push_back(Chain.getOperand(0)); 841 Operands.push_back(TargetAddress); 842 Operands.push_back(StackAdjustment); 843 // Copy registers used by the call. Last operand is a flag so it is not 844 // copied. 845 for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) { 846 Operands.push_back(Chain.getOperand(i)); 847 } 848 return DAG.getNode(X86ISD::TC_RETURN, MVT::Other, &Operands[0], 849 Operands.size()); 850 } 851 852 // Regular return. 853 SDOperand Flag; 854 855 SmallVector<SDOperand, 6> RetOps; 856 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 857 // Operand #1 = Bytes To Pop 858 RetOps.push_back(DAG.getConstant(getBytesToPopOnReturn(), MVT::i16)); 859 860 // Copy the result values into the output registers. 861 for (unsigned i = 0; i != RVLocs.size(); ++i) { 862 CCValAssign &VA = RVLocs[i]; 863 assert(VA.isRegLoc() && "Can only return in registers!"); 864 SDOperand ValToCopy = Op.getOperand(i*2+1); 865 866 // Returns in ST0/ST1 are handled specially: these are pushed as operands to 867 // the RET instruction and handled by the FP Stackifier. 868 if (RVLocs[i].getLocReg() == X86::ST0 || 869 RVLocs[i].getLocReg() == X86::ST1) { 870 // If this is a copy from an xmm register to ST(0), use an FPExtend to 871 // change the value to the FP stack register class. 872 if (isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) 873 ValToCopy = DAG.getNode(ISD::FP_EXTEND, MVT::f80, ValToCopy); 874 RetOps.push_back(ValToCopy); 875 // Don't emit a copytoreg. 876 continue; 877 } 878 879 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), ValToCopy, Flag); 880 Flag = Chain.getValue(1); 881 } 882 883 RetOps[0] = Chain; // Update chain. 884 885 // Add the flag if we have it. 886 if (Flag.Val) 887 RetOps.push_back(Flag); 888 889 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, &RetOps[0], RetOps.size()); 890} 891 892 893/// LowerCallResult - Lower the result values of an ISD::CALL into the 894/// appropriate copies out of appropriate physical registers. This assumes that 895/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call 896/// being lowered. The returns a SDNode with the same number of values as the 897/// ISD::CALL. 898SDNode *X86TargetLowering:: 899LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, 900 unsigned CallingConv, SelectionDAG &DAG) { 901 902 // Assign locations to each value returned by this call. 903 SmallVector<CCValAssign, 16> RVLocs; 904 bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0; 905 CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs); 906 CCInfo.AnalyzeCallResult(TheCall, RetCC_X86); 907 908 SmallVector<SDOperand, 8> ResultVals; 909 910 // Copy all of the result registers out of their specified physreg. 911 for (unsigned i = 0; i != RVLocs.size(); ++i) { 912 MVT::ValueType CopyVT = RVLocs[i].getValVT(); 913 914 // If this is a call to a function that returns an fp value on the floating 915 // point stack, but where we prefer to use the value in xmm registers, copy 916 // it out as F80 and use a truncate to move it from fp stack reg to xmm reg. 917 if (RVLocs[i].getLocReg() == X86::ST0 && 918 isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) { 919 CopyVT = MVT::f80; 920 } 921 922 Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(), 923 CopyVT, InFlag).getValue(1); 924 SDOperand Val = Chain.getValue(0); 925 InFlag = Chain.getValue(2); 926 927 if (CopyVT != RVLocs[i].getValVT()) { 928 // Round the F80 the right size, which also moves to the appropriate xmm 929 // register. 930 Val = DAG.getNode(ISD::FP_ROUND, RVLocs[i].getValVT(), Val, 931 // This truncation won't change the value. 932 DAG.getIntPtrConstant(1)); 933 } 934 935 ResultVals.push_back(Val); 936 } 937 938 // Merge everything together with a MERGE_VALUES node. 939 ResultVals.push_back(Chain); 940 return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(), 941 &ResultVals[0], ResultVals.size()).Val; 942} 943 944 945//===----------------------------------------------------------------------===// 946// C & StdCall & Fast Calling Convention implementation 947//===----------------------------------------------------------------------===// 948// StdCall calling convention seems to be standard for many Windows' API 949// routines and around. It differs from C calling convention just a little: 950// callee should clean up the stack, not caller. Symbols should be also 951// decorated in some fancy way :) It doesn't support any vector arguments. 952// For info on fast calling convention see Fast Calling Convention (tail call) 953// implementation LowerX86_32FastCCCallTo. 954 955/// AddLiveIn - This helper function adds the specified physical register to the 956/// MachineFunction as a live in value. It also creates a corresponding virtual 957/// register for it. 958static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg, 959 const TargetRegisterClass *RC) { 960 assert(RC->contains(PReg) && "Not the correct regclass!"); 961 unsigned VReg = MF.getRegInfo().createVirtualRegister(RC); 962 MF.getRegInfo().addLiveIn(PReg, VReg); 963 return VReg; 964} 965 966/// CallIsStructReturn - Determines whether a CALL node uses struct return 967/// semantics. 968static bool CallIsStructReturn(SDOperand Op) { 969 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 970 if (!NumOps) 971 return false; 972 973 return cast<ARG_FLAGSSDNode>(Op.getOperand(6))->getArgFlags().isSRet(); 974} 975 976/// ArgsAreStructReturn - Determines whether a FORMAL_ARGUMENTS node uses struct 977/// return semantics. 978static bool ArgsAreStructReturn(SDOperand Op) { 979 unsigned NumArgs = Op.Val->getNumValues() - 1; 980 if (!NumArgs) 981 return false; 982 983 return cast<ARG_FLAGSSDNode>(Op.getOperand(3))->getArgFlags().isSRet(); 984} 985 986/// IsCalleePop - Determines whether a CALL or FORMAL_ARGUMENTS node requires the 987/// callee to pop its own arguments. Callee pop is necessary to support tail 988/// calls. 989bool X86TargetLowering::IsCalleePop(SDOperand Op) { 990 bool IsVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 991 if (IsVarArg) 992 return false; 993 994 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { 995 default: 996 return false; 997 case CallingConv::X86_StdCall: 998 return !Subtarget->is64Bit(); 999 case CallingConv::X86_FastCall: 1000 return !Subtarget->is64Bit(); 1001 case CallingConv::Fast: 1002 return PerformTailCallOpt; 1003 } 1004} 1005 1006/// CCAssignFnForNode - Selects the correct CCAssignFn for a CALL or 1007/// FORMAL_ARGUMENTS node. 1008CCAssignFn *X86TargetLowering::CCAssignFnForNode(SDOperand Op) const { 1009 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1010 1011 if (Subtarget->is64Bit()) { 1012 if (Subtarget->isTargetWin64()) 1013 return CC_X86_Win64_C; 1014 else { 1015 if (CC == CallingConv::Fast && PerformTailCallOpt) 1016 return CC_X86_64_TailCall; 1017 else 1018 return CC_X86_64_C; 1019 } 1020 } 1021 1022 if (CC == CallingConv::X86_FastCall) 1023 return CC_X86_32_FastCall; 1024 else if (CC == CallingConv::Fast && PerformTailCallOpt) 1025 return CC_X86_32_TailCall; 1026 else 1027 return CC_X86_32_C; 1028} 1029 1030/// NameDecorationForFORMAL_ARGUMENTS - Selects the appropriate decoration to 1031/// apply to a MachineFunction containing a given FORMAL_ARGUMENTS node. 1032NameDecorationStyle 1033X86TargetLowering::NameDecorationForFORMAL_ARGUMENTS(SDOperand Op) { 1034 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1035 if (CC == CallingConv::X86_FastCall) 1036 return FastCall; 1037 else if (CC == CallingConv::X86_StdCall) 1038 return StdCall; 1039 return None; 1040} 1041 1042/// IsPossiblyOverwrittenArgumentOfTailCall - Check if the operand could 1043/// possibly be overwritten when lowering the outgoing arguments in a tail 1044/// call. Currently the implementation of this call is very conservative and 1045/// assumes all arguments sourcing from FORMAL_ARGUMENTS or a CopyFromReg with 1046/// virtual registers would be overwritten by direct lowering. 1047static bool IsPossiblyOverwrittenArgumentOfTailCall(SDOperand Op, 1048 MachineFrameInfo * MFI) { 1049 RegisterSDNode * OpReg = NULL; 1050 FrameIndexSDNode * FrameIdxNode = NULL; 1051 int FrameIdx = 0; 1052 if (Op.getOpcode() == ISD::FORMAL_ARGUMENTS || 1053 (Op.getOpcode()== ISD::CopyFromReg && 1054 (OpReg = dyn_cast<RegisterSDNode>(Op.getOperand(1))) && 1055 (OpReg->getReg() >= TargetRegisterInfo::FirstVirtualRegister)) || 1056 (Op.getOpcode() == ISD::LOAD && 1057 (FrameIdxNode = dyn_cast<FrameIndexSDNode>(Op.getOperand(1))) && 1058 (MFI->isFixedObjectIndex((FrameIdx = FrameIdxNode->getIndex()))) && 1059 (MFI->getObjectOffset(FrameIdx) >= 0))) 1060 return true; 1061 return false; 1062} 1063 1064/// CallRequiresGOTInRegister - Check whether the call requires the GOT pointer 1065/// in a register before calling. 1066bool X86TargetLowering::CallRequiresGOTPtrInReg(bool Is64Bit, bool IsTailCall) { 1067 return !IsTailCall && !Is64Bit && 1068 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1069 Subtarget->isPICStyleGOT(); 1070} 1071 1072 1073/// CallRequiresFnAddressInReg - Check whether the call requires the function 1074/// address to be loaded in a register. 1075bool 1076X86TargetLowering::CallRequiresFnAddressInReg(bool Is64Bit, bool IsTailCall) { 1077 return !Is64Bit && IsTailCall && 1078 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1079 Subtarget->isPICStyleGOT(); 1080} 1081 1082/// CopyTailCallClobberedArgumentsToVRegs - Create virtual registers for all 1083/// arguments to force loading and guarantee that arguments sourcing from 1084/// incomming parameters are not overwriting each other. 1085static SDOperand 1086CopyTailCallClobberedArgumentsToVRegs(SDOperand Chain, 1087 SmallVector<std::pair<unsigned, SDOperand>, 8> &TailCallClobberedVRegs, 1088 SelectionDAG &DAG, 1089 MachineFunction &MF, 1090 const TargetLowering * TL) { 1091 1092 SDOperand InFlag; 1093 for (unsigned i = 0, e = TailCallClobberedVRegs.size(); i != e; i++) { 1094 SDOperand Arg = TailCallClobberedVRegs[i].second; 1095 unsigned Idx = TailCallClobberedVRegs[i].first; 1096 unsigned VReg = 1097 MF.getRegInfo(). 1098 createVirtualRegister(TL->getRegClassFor(Arg.getValueType())); 1099 Chain = DAG.getCopyToReg(Chain, VReg, Arg, InFlag); 1100 InFlag = Chain.getValue(1); 1101 Arg = DAG.getCopyFromReg(Chain, VReg, Arg.getValueType(), InFlag); 1102 TailCallClobberedVRegs[i] = std::make_pair(Idx, Arg); 1103 Chain = Arg.getValue(1); 1104 InFlag = Arg.getValue(2); 1105 } 1106 return Chain; 1107} 1108 1109/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1110/// by "Src" to address "Dst" with size and alignment information specified by 1111/// the specific parameter attribute. The copy will be passed as a byval function 1112/// parameter. 1113static SDOperand 1114CreateCopyOfByValArgument(SDOperand Src, SDOperand Dst, SDOperand Chain, 1115 ISD::ArgFlagsTy Flags, SelectionDAG &DAG) { 1116 SDOperand AlignNode = DAG.getConstant(Flags.getByValAlign(), MVT::i32); 1117 SDOperand SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1118 SDOperand AlwaysInline = DAG.getConstant(1, MVT::i32); 1119 return DAG.getMemcpy(Chain, Dst, Src, SizeNode, AlignNode, AlwaysInline); 1120} 1121 1122SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG, 1123 const CCValAssign &VA, 1124 MachineFrameInfo *MFI, 1125 unsigned CC, 1126 SDOperand Root, unsigned i) { 1127 // Create the nodes corresponding to a load from this parameter slot. 1128 ISD::ArgFlagsTy Flags = 1129 cast<ARG_FLAGSSDNode>(Op.getOperand(3 + i))->getArgFlags(); 1130 bool AlwaysUseMutable = (CC==CallingConv::Fast) && PerformTailCallOpt; 1131 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); 1132 1133 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1134 // changed with more analysis. 1135 // In case of tail call optimization mark all arguments mutable. Since they 1136 // could be overwritten by lowering of arguments in case of a tail call. 1137 int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, 1138 VA.getLocMemOffset(), isImmutable); 1139 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 1140 if (Flags.isByVal()) 1141 return FIN; 1142 return DAG.getLoad(VA.getValVT(), Root, FIN, 1143 PseudoSourceValue::getFixedStack(), FI); 1144} 1145 1146SDOperand 1147X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { 1148 MachineFunction &MF = DAG.getMachineFunction(); 1149 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1150 1151 const Function* Fn = MF.getFunction(); 1152 if (Fn->hasExternalLinkage() && 1153 Subtarget->isTargetCygMing() && 1154 Fn->getName() == "main") 1155 FuncInfo->setForceFramePointer(true); 1156 1157 // Decorate the function name. 1158 FuncInfo->setDecorationStyle(NameDecorationForFORMAL_ARGUMENTS(Op)); 1159 1160 MachineFrameInfo *MFI = MF.getFrameInfo(); 1161 SDOperand Root = Op.getOperand(0); 1162 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1163 unsigned CC = MF.getFunction()->getCallingConv(); 1164 bool Is64Bit = Subtarget->is64Bit(); 1165 1166 assert(!(isVarArg && CC == CallingConv::Fast) && 1167 "Var args not supported with calling convention fastcc"); 1168 1169 // Assign locations to all of the incoming arguments. 1170 SmallVector<CCValAssign, 16> ArgLocs; 1171 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1172 CCInfo.AnalyzeFormalArguments(Op.Val, CCAssignFnForNode(Op)); 1173 1174 SmallVector<SDOperand, 8> ArgValues; 1175 unsigned LastVal = ~0U; 1176 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1177 CCValAssign &VA = ArgLocs[i]; 1178 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1179 // places. 1180 assert(VA.getValNo() != LastVal && 1181 "Don't support value assigned to multiple locs yet"); 1182 LastVal = VA.getValNo(); 1183 1184 if (VA.isRegLoc()) { 1185 MVT::ValueType RegVT = VA.getLocVT(); 1186 TargetRegisterClass *RC; 1187 if (RegVT == MVT::i32) 1188 RC = X86::GR32RegisterClass; 1189 else if (Is64Bit && RegVT == MVT::i64) 1190 RC = X86::GR64RegisterClass; 1191 else if (RegVT == MVT::f32) 1192 RC = X86::FR32RegisterClass; 1193 else if (RegVT == MVT::f64) 1194 RC = X86::FR64RegisterClass; 1195 else { 1196 assert(MVT::isVector(RegVT)); 1197 if (Is64Bit && MVT::getSizeInBits(RegVT) == 64) { 1198 RC = X86::GR64RegisterClass; // MMX values are passed in GPRs. 1199 RegVT = MVT::i64; 1200 } else 1201 RC = X86::VR128RegisterClass; 1202 } 1203 1204 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 1205 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 1206 1207 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1208 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1209 // right size. 1210 if (VA.getLocInfo() == CCValAssign::SExt) 1211 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 1212 DAG.getValueType(VA.getValVT())); 1213 else if (VA.getLocInfo() == CCValAssign::ZExt) 1214 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 1215 DAG.getValueType(VA.getValVT())); 1216 1217 if (VA.getLocInfo() != CCValAssign::Full) 1218 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 1219 1220 // Handle MMX values passed in GPRs. 1221 if (Is64Bit && RegVT != VA.getLocVT() && RC == X86::GR64RegisterClass && 1222 MVT::getSizeInBits(RegVT) == 64) 1223 ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); 1224 1225 ArgValues.push_back(ArgValue); 1226 } else { 1227 assert(VA.isMemLoc()); 1228 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, CC, Root, i)); 1229 } 1230 } 1231 1232 unsigned StackSize = CCInfo.getNextStackOffset(); 1233 // align stack specially for tail calls 1234 if (CC == CallingConv::Fast) 1235 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1236 1237 // If the function takes variable number of arguments, make a frame index for 1238 // the start of the first vararg value... for expansion of llvm.va_start. 1239 if (isVarArg) { 1240 if (Is64Bit || CC != CallingConv::X86_FastCall) { 1241 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); 1242 } 1243 if (Is64Bit) { 1244 static const unsigned GPR64ArgRegs[] = { 1245 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1246 }; 1247 static const unsigned XMMArgRegs[] = { 1248 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1249 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1250 }; 1251 1252 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 6); 1253 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1254 1255 // For X86-64, if there are vararg parameters that are passed via 1256 // registers, then we must store them to their spots on the stack so they 1257 // may be loaded by deferencing the result of va_next. 1258 VarArgsGPOffset = NumIntRegs * 8; 1259 VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16; 1260 RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16); 1261 1262 // Store the integer parameter registers. 1263 SmallVector<SDOperand, 8> MemOps; 1264 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 1265 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1266 DAG.getIntPtrConstant(VarArgsGPOffset)); 1267 for (; NumIntRegs != 6; ++NumIntRegs) { 1268 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], 1269 X86::GR64RegisterClass); 1270 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1271 SDOperand Store = 1272 DAG.getStore(Val.getValue(1), Val, FIN, 1273 PseudoSourceValue::getFixedStack(), 1274 RegSaveFrameIndex); 1275 MemOps.push_back(Store); 1276 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1277 DAG.getIntPtrConstant(8)); 1278 } 1279 1280 // Now store the XMM (fp + vector) parameter registers. 1281 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1282 DAG.getIntPtrConstant(VarArgsFPOffset)); 1283 for (; NumXMMRegs != 8; ++NumXMMRegs) { 1284 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 1285 X86::VR128RegisterClass); 1286 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); 1287 SDOperand Store = 1288 DAG.getStore(Val.getValue(1), Val, FIN, 1289 PseudoSourceValue::getFixedStack(), 1290 RegSaveFrameIndex); 1291 MemOps.push_back(Store); 1292 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1293 DAG.getIntPtrConstant(16)); 1294 } 1295 if (!MemOps.empty()) 1296 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 1297 &MemOps[0], MemOps.size()); 1298 } 1299 } 1300 1301 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1302 // arguments and the arguments after the retaddr has been pushed are 1303 // aligned. 1304 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1305 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1306 (StackSize & 7) == 0) 1307 StackSize += 4; 1308 1309 ArgValues.push_back(Root); 1310 1311 // Some CCs need callee pop. 1312 if (IsCalleePop(Op)) { 1313 BytesToPopOnReturn = StackSize; // Callee pops everything. 1314 BytesCallerReserves = 0; 1315 } else { 1316 BytesToPopOnReturn = 0; // Callee pops nothing. 1317 // If this is an sret function, the return should pop the hidden pointer. 1318 if (!Is64Bit && ArgsAreStructReturn(Op)) 1319 BytesToPopOnReturn = 4; 1320 BytesCallerReserves = StackSize; 1321 } 1322 1323 if (!Is64Bit) { 1324 RegSaveFrameIndex = 0xAAAAAAA; // RegSaveFrameIndex is X86-64 only. 1325 if (CC == CallingConv::X86_FastCall) 1326 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 1327 } 1328 1329 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 1330 1331 // Return the new list of results. 1332 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1333 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1334} 1335 1336SDOperand 1337X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG, 1338 const SDOperand &StackPtr, 1339 const CCValAssign &VA, 1340 SDOperand Chain, 1341 SDOperand Arg) { 1342 unsigned LocMemOffset = VA.getLocMemOffset(); 1343 SDOperand PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1344 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1345 ISD::ArgFlagsTy Flags = 1346 cast<ARG_FLAGSSDNode>(Op.getOperand(6+2*VA.getValNo()))->getArgFlags(); 1347 if (Flags.isByVal()) { 1348 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG); 1349 } 1350 return DAG.getStore(Chain, Arg, PtrOff, 1351 PseudoSourceValue::getStack(), LocMemOffset); 1352} 1353 1354 1355SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { 1356 MachineFunction &MF = DAG.getMachineFunction(); 1357 MachineFrameInfo * MFI = MF.getFrameInfo(); 1358 SDOperand Chain = Op.getOperand(0); 1359 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1360 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1361 bool IsTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0 1362 && CC == CallingConv::Fast && PerformTailCallOpt; 1363 SDOperand Callee = Op.getOperand(4); 1364 bool Is64Bit = Subtarget->is64Bit(); 1365 bool IsStructRet = CallIsStructReturn(Op); 1366 1367 assert(!(isVarArg && CC == CallingConv::Fast) && 1368 "Var args not supported with calling convention fastcc"); 1369 1370 // Analyze operands of the call, assigning locations to each operand. 1371 SmallVector<CCValAssign, 16> ArgLocs; 1372 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1373 CCInfo.AnalyzeCallOperands(Op.Val, CCAssignFnForNode(Op)); 1374 1375 // Get a count of how many bytes are to be pushed on the stack. 1376 unsigned NumBytes = CCInfo.getNextStackOffset(); 1377 if (CC == CallingConv::Fast) 1378 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 1379 1380 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1381 // arguments and the arguments after the retaddr has been pushed are aligned. 1382 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1383 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1384 (NumBytes & 7) == 0) 1385 NumBytes += 4; 1386 1387 int FPDiff = 0; 1388 if (IsTailCall) { 1389 // Lower arguments at fp - stackoffset + fpdiff. 1390 unsigned NumBytesCallerPushed = 1391 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 1392 FPDiff = NumBytesCallerPushed - NumBytes; 1393 1394 // Set the delta of movement of the returnaddr stackslot. 1395 // But only set if delta is greater than previous delta. 1396 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 1397 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 1398 } 1399 1400 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes)); 1401 1402 SDOperand RetAddrFrIdx; 1403 if (IsTailCall) { 1404 // Adjust the Return address stack slot. 1405 if (FPDiff) { 1406 MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32; 1407 RetAddrFrIdx = getReturnAddressFrameIndex(DAG); 1408 // Load the "old" Return address. 1409 RetAddrFrIdx = 1410 DAG.getLoad(VT, Chain,RetAddrFrIdx, NULL, 0); 1411 Chain = SDOperand(RetAddrFrIdx.Val, 1); 1412 } 1413 } 1414 1415 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1416 SmallVector<std::pair<unsigned, SDOperand>, 8> TailCallClobberedVRegs; 1417 SmallVector<SDOperand, 8> MemOpChains; 1418 1419 SDOperand StackPtr; 1420 1421 // Walk the register/memloc assignments, inserting copies/loads. For tail 1422 // calls, remember all arguments for later special lowering. 1423 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1424 CCValAssign &VA = ArgLocs[i]; 1425 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1426 1427 // Promote the value if needed. 1428 switch (VA.getLocInfo()) { 1429 default: assert(0 && "Unknown loc info!"); 1430 case CCValAssign::Full: break; 1431 case CCValAssign::SExt: 1432 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1433 break; 1434 case CCValAssign::ZExt: 1435 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1436 break; 1437 case CCValAssign::AExt: 1438 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1439 break; 1440 } 1441 1442 if (VA.isRegLoc()) { 1443 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1444 } else { 1445 if (!IsTailCall) { 1446 assert(VA.isMemLoc()); 1447 if (StackPtr.Val == 0) 1448 StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy()); 1449 1450 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1451 Arg)); 1452 } else if (IsPossiblyOverwrittenArgumentOfTailCall(Arg, MFI)) { 1453 TailCallClobberedVRegs.push_back(std::make_pair(i,Arg)); 1454 } 1455 } 1456 } 1457 1458 if (!MemOpChains.empty()) 1459 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1460 &MemOpChains[0], MemOpChains.size()); 1461 1462 // Build a sequence of copy-to-reg nodes chained together with token chain 1463 // and flag operands which copy the outgoing args into registers. 1464 SDOperand InFlag; 1465 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1466 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1467 InFlag); 1468 InFlag = Chain.getValue(1); 1469 } 1470 1471 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1472 // GOT pointer. 1473 if (CallRequiresGOTPtrInReg(Is64Bit, IsTailCall)) { 1474 Chain = DAG.getCopyToReg(Chain, X86::EBX, 1475 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 1476 InFlag); 1477 InFlag = Chain.getValue(1); 1478 } 1479 // If we are tail calling and generating PIC/GOT style code load the address 1480 // of the callee into ecx. The value in ecx is used as target of the tail 1481 // jump. This is done to circumvent the ebx/callee-saved problem for tail 1482 // calls on PIC/GOT architectures. Normally we would just put the address of 1483 // GOT into ebx and then call target@PLT. But for tail callss ebx would be 1484 // restored (since ebx is callee saved) before jumping to the target@PLT. 1485 if (CallRequiresFnAddressInReg(Is64Bit, IsTailCall)) { 1486 // Note: The actual moving to ecx is done further down. 1487 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 1488 if (G && !G->getGlobal()->hasHiddenVisibility() && 1489 !G->getGlobal()->hasProtectedVisibility()) 1490 Callee = LowerGlobalAddress(Callee, DAG); 1491 else if (isa<ExternalSymbolSDNode>(Callee)) 1492 Callee = LowerExternalSymbol(Callee,DAG); 1493 } 1494 1495 if (Is64Bit && isVarArg) { 1496 // From AMD64 ABI document: 1497 // For calls that may call functions that use varargs or stdargs 1498 // (prototype-less calls or calls to functions containing ellipsis (...) in 1499 // the declaration) %al is used as hidden argument to specify the number 1500 // of SSE registers used. The contents of %al do not need to match exactly 1501 // the number of registers, but must be an ubound on the number of SSE 1502 // registers used and is in the range 0 - 8 inclusive. 1503 1504 // Count the number of XMM registers allocated. 1505 static const unsigned XMMArgRegs[] = { 1506 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1507 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1508 }; 1509 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1510 1511 Chain = DAG.getCopyToReg(Chain, X86::AL, 1512 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 1513 InFlag = Chain.getValue(1); 1514 } 1515 1516 1517 // For tail calls lower the arguments to the 'real' stack slot. 1518 if (IsTailCall) { 1519 SmallVector<SDOperand, 8> MemOpChains2; 1520 SDOperand FIN; 1521 int FI = 0; 1522 // Do not flag preceeding copytoreg stuff together with the following stuff. 1523 InFlag = SDOperand(); 1524 1525 Chain = CopyTailCallClobberedArgumentsToVRegs(Chain, TailCallClobberedVRegs, 1526 DAG, MF, this); 1527 1528 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1529 CCValAssign &VA = ArgLocs[i]; 1530 if (!VA.isRegLoc()) { 1531 assert(VA.isMemLoc()); 1532 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1533 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1534 ISD::ArgFlagsTy Flags = 1535 cast<ARG_FLAGSSDNode>(FlagsOp)->getArgFlags(); 1536 // Create frame index. 1537 int32_t Offset = VA.getLocMemOffset()+FPDiff; 1538 uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8; 1539 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); 1540 FIN = DAG.getFrameIndex(FI, MVT::i32); 1541 1542 // Find virtual register for this argument. 1543 bool Found=false; 1544 for (unsigned idx=0, e= TailCallClobberedVRegs.size(); idx < e; idx++) 1545 if (TailCallClobberedVRegs[idx].first==i) { 1546 Arg = TailCallClobberedVRegs[idx].second; 1547 Found=true; 1548 break; 1549 } 1550 assert(IsPossiblyOverwrittenArgumentOfTailCall(Arg, MFI)==false || 1551 (Found==true && "No corresponding Argument was found")); 1552 1553 if (Flags.isByVal()) { 1554 // Copy relative to framepointer. 1555 MemOpChains2.push_back(CreateCopyOfByValArgument(Arg, FIN, Chain, 1556 Flags, DAG)); 1557 } else { 1558 // Store relative to framepointer. 1559 MemOpChains2.push_back( 1560 DAG.getStore(Chain, Arg, FIN, 1561 PseudoSourceValue::getFixedStack(), FI)); 1562 } 1563 } 1564 } 1565 1566 if (!MemOpChains2.empty()) 1567 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1568 &MemOpChains2[0], MemOpChains2.size()); 1569 1570 // Store the return address to the appropriate stack slot. 1571 if (FPDiff) { 1572 // Calculate the new stack slot for the return address. 1573 int SlotSize = Is64Bit ? 8 : 4; 1574 int NewReturnAddrFI = 1575 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize); 1576 MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32; 1577 SDOperand NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 1578 Chain = DAG.getStore(Chain, RetAddrFrIdx, NewRetAddrFrIdx, 1579 PseudoSourceValue::getFixedStack(), NewReturnAddrFI); 1580 } 1581 } 1582 1583 // If the callee is a GlobalAddress node (quite common, every direct call is) 1584 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1585 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1586 // We should use extra load for direct calls to dllimported functions in 1587 // non-JIT mode. 1588 if ((IsTailCall || !Is64Bit || 1589 getTargetMachine().getCodeModel() != CodeModel::Large) 1590 && !Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1591 getTargetMachine(), true)) 1592 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1593 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1594 if (IsTailCall || !Is64Bit || 1595 getTargetMachine().getCodeModel() != CodeModel::Large) 1596 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1597 } else if (IsTailCall) { 1598 unsigned Opc = Is64Bit ? X86::R9 : X86::ECX; 1599 1600 Chain = DAG.getCopyToReg(Chain, 1601 DAG.getRegister(Opc, getPointerTy()), 1602 Callee,InFlag); 1603 Callee = DAG.getRegister(Opc, getPointerTy()); 1604 // Add register as live out. 1605 DAG.getMachineFunction().getRegInfo().addLiveOut(Opc); 1606 } 1607 1608 // Returns a chain & a flag for retval copy to use. 1609 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1610 SmallVector<SDOperand, 8> Ops; 1611 1612 if (IsTailCall) { 1613 Ops.push_back(Chain); 1614 Ops.push_back(DAG.getIntPtrConstant(NumBytes)); 1615 Ops.push_back(DAG.getIntPtrConstant(0)); 1616 if (InFlag.Val) 1617 Ops.push_back(InFlag); 1618 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1619 InFlag = Chain.getValue(1); 1620 1621 // Returns a chain & a flag for retval copy to use. 1622 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1623 Ops.clear(); 1624 } 1625 1626 Ops.push_back(Chain); 1627 Ops.push_back(Callee); 1628 1629 if (IsTailCall) 1630 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 1631 1632 // Add argument registers to the end of the list so that they are known live 1633 // into the call. 1634 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1635 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1636 RegsToPass[i].second.getValueType())); 1637 1638 // Add an implicit use GOT pointer in EBX. 1639 if (!IsTailCall && !Is64Bit && 1640 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1641 Subtarget->isPICStyleGOT()) 1642 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 1643 1644 // Add an implicit use of AL for x86 vararg functions. 1645 if (Is64Bit && isVarArg) 1646 Ops.push_back(DAG.getRegister(X86::AL, MVT::i8)); 1647 1648 if (InFlag.Val) 1649 Ops.push_back(InFlag); 1650 1651 if (IsTailCall) { 1652 assert(InFlag.Val && 1653 "Flag must be set. Depend on flag being set in LowerRET"); 1654 Chain = DAG.getNode(X86ISD::TAILCALL, 1655 Op.Val->getVTList(), &Ops[0], Ops.size()); 1656 1657 return SDOperand(Chain.Val, Op.ResNo); 1658 } 1659 1660 Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size()); 1661 InFlag = Chain.getValue(1); 1662 1663 // Create the CALLSEQ_END node. 1664 unsigned NumBytesForCalleeToPush; 1665 if (IsCalleePop(Op)) 1666 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 1667 else if (!Is64Bit && IsStructRet) 1668 // If this is is a call to a struct-return function, the callee 1669 // pops the hidden struct pointer, so we have to push it back. 1670 // This is common for Darwin/X86, Linux & Mingw32 targets. 1671 NumBytesForCalleeToPush = 4; 1672 else 1673 NumBytesForCalleeToPush = 0; // Callee pops nothing. 1674 1675 // Returns a flag for retval copy to use. 1676 Chain = DAG.getCALLSEQ_END(Chain, 1677 DAG.getIntPtrConstant(NumBytes), 1678 DAG.getIntPtrConstant(NumBytesForCalleeToPush), 1679 InFlag); 1680 InFlag = Chain.getValue(1); 1681 1682 // Handle result values, copying them out of physregs into vregs that we 1683 // return. 1684 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1685} 1686 1687 1688//===----------------------------------------------------------------------===// 1689// Fast Calling Convention (tail call) implementation 1690//===----------------------------------------------------------------------===// 1691 1692// Like std call, callee cleans arguments, convention except that ECX is 1693// reserved for storing the tail called function address. Only 2 registers are 1694// free for argument passing (inreg). Tail call optimization is performed 1695// provided: 1696// * tailcallopt is enabled 1697// * caller/callee are fastcc 1698// On X86_64 architecture with GOT-style position independent code only local 1699// (within module) calls are supported at the moment. 1700// To keep the stack aligned according to platform abi the function 1701// GetAlignedArgumentStackSize ensures that argument delta is always multiples 1702// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 1703// If a tail called function callee has more arguments than the caller the 1704// caller needs to make sure that there is room to move the RETADDR to. This is 1705// achieved by reserving an area the size of the argument delta right after the 1706// original REtADDR, but before the saved framepointer or the spilled registers 1707// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 1708// stack layout: 1709// arg1 1710// arg2 1711// RETADDR 1712// [ new RETADDR 1713// move area ] 1714// (possible EBP) 1715// ESI 1716// EDI 1717// local1 .. 1718 1719/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 1720/// for a 16 byte align requirement. 1721unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 1722 SelectionDAG& DAG) { 1723 if (PerformTailCallOpt) { 1724 MachineFunction &MF = DAG.getMachineFunction(); 1725 const TargetMachine &TM = MF.getTarget(); 1726 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 1727 unsigned StackAlignment = TFI.getStackAlignment(); 1728 uint64_t AlignMask = StackAlignment - 1; 1729 int64_t Offset = StackSize; 1730 unsigned SlotSize = Subtarget->is64Bit() ? 8 : 4; 1731 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 1732 // Number smaller than 12 so just add the difference. 1733 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 1734 } else { 1735 // Mask out lower bits, add stackalignment once plus the 12 bytes. 1736 Offset = ((~AlignMask) & Offset) + StackAlignment + 1737 (StackAlignment-SlotSize); 1738 } 1739 StackSize = Offset; 1740 } 1741 return StackSize; 1742} 1743 1744/// IsEligibleForTailCallElimination - Check to see whether the next instruction 1745/// following the call is a return. A function is eligible if caller/callee 1746/// calling conventions match, currently only fastcc supports tail calls, and 1747/// the function CALL is immediatly followed by a RET. 1748bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call, 1749 SDOperand Ret, 1750 SelectionDAG& DAG) const { 1751 if (!PerformTailCallOpt) 1752 return false; 1753 1754 // Check whether CALL node immediatly preceeds the RET node and whether the 1755 // return uses the result of the node or is a void return. 1756 unsigned NumOps = Ret.getNumOperands(); 1757 if ((NumOps == 1 && 1758 (Ret.getOperand(0) == SDOperand(Call.Val,1) || 1759 Ret.getOperand(0) == SDOperand(Call.Val,0))) || 1760 (NumOps > 1 && 1761 Ret.getOperand(0) == SDOperand(Call.Val,Call.Val->getNumValues()-1) && 1762 Ret.getOperand(1) == SDOperand(Call.Val,0))) { 1763 MachineFunction &MF = DAG.getMachineFunction(); 1764 unsigned CallerCC = MF.getFunction()->getCallingConv(); 1765 unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue(); 1766 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 1767 SDOperand Callee = Call.getOperand(4); 1768 // On x86/32Bit PIC/GOT tail calls are supported. 1769 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ || 1770 !Subtarget->isPICStyleGOT()|| !Subtarget->is64Bit()) 1771 return true; 1772 1773 // Can only do local tail calls (in same module, hidden or protected) on 1774 // x86_64 PIC/GOT at the moment. 1775 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1776 return G->getGlobal()->hasHiddenVisibility() 1777 || G->getGlobal()->hasProtectedVisibility(); 1778 } 1779 } 1780 1781 return false; 1782} 1783 1784//===----------------------------------------------------------------------===// 1785// Other Lowering Hooks 1786//===----------------------------------------------------------------------===// 1787 1788 1789SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { 1790 MachineFunction &MF = DAG.getMachineFunction(); 1791 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1792 int ReturnAddrIndex = FuncInfo->getRAIndex(); 1793 1794 if (ReturnAddrIndex == 0) { 1795 // Set up a frame object for the return address. 1796 if (Subtarget->is64Bit()) 1797 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8); 1798 else 1799 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4); 1800 1801 FuncInfo->setRAIndex(ReturnAddrIndex); 1802 } 1803 1804 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 1805} 1806 1807 1808 1809/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86 1810/// specific condition code. It returns a false if it cannot do a direct 1811/// translation. X86CC is the translated CondCode. LHS/RHS are modified as 1812/// needed. 1813static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 1814 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS, 1815 SelectionDAG &DAG) { 1816 X86CC = X86::COND_INVALID; 1817 if (!isFP) { 1818 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 1819 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 1820 // X > -1 -> X == 0, jump !sign. 1821 RHS = DAG.getConstant(0, RHS.getValueType()); 1822 X86CC = X86::COND_NS; 1823 return true; 1824 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 1825 // X < 0 -> X == 0, jump on sign. 1826 X86CC = X86::COND_S; 1827 return true; 1828 } else if (SetCCOpcode == ISD::SETLT && RHSC->getValue() == 1) { 1829 // X < 1 -> X <= 0 1830 RHS = DAG.getConstant(0, RHS.getValueType()); 1831 X86CC = X86::COND_LE; 1832 return true; 1833 } 1834 } 1835 1836 switch (SetCCOpcode) { 1837 default: break; 1838 case ISD::SETEQ: X86CC = X86::COND_E; break; 1839 case ISD::SETGT: X86CC = X86::COND_G; break; 1840 case ISD::SETGE: X86CC = X86::COND_GE; break; 1841 case ISD::SETLT: X86CC = X86::COND_L; break; 1842 case ISD::SETLE: X86CC = X86::COND_LE; break; 1843 case ISD::SETNE: X86CC = X86::COND_NE; break; 1844 case ISD::SETULT: X86CC = X86::COND_B; break; 1845 case ISD::SETUGT: X86CC = X86::COND_A; break; 1846 case ISD::SETULE: X86CC = X86::COND_BE; break; 1847 case ISD::SETUGE: X86CC = X86::COND_AE; break; 1848 } 1849 } else { 1850 // On a floating point condition, the flags are set as follows: 1851 // ZF PF CF op 1852 // 0 | 0 | 0 | X > Y 1853 // 0 | 0 | 1 | X < Y 1854 // 1 | 0 | 0 | X == Y 1855 // 1 | 1 | 1 | unordered 1856 bool Flip = false; 1857 switch (SetCCOpcode) { 1858 default: break; 1859 case ISD::SETUEQ: 1860 case ISD::SETEQ: X86CC = X86::COND_E; break; 1861 case ISD::SETOLT: Flip = true; // Fallthrough 1862 case ISD::SETOGT: 1863 case ISD::SETGT: X86CC = X86::COND_A; break; 1864 case ISD::SETOLE: Flip = true; // Fallthrough 1865 case ISD::SETOGE: 1866 case ISD::SETGE: X86CC = X86::COND_AE; break; 1867 case ISD::SETUGT: Flip = true; // Fallthrough 1868 case ISD::SETULT: 1869 case ISD::SETLT: X86CC = X86::COND_B; break; 1870 case ISD::SETUGE: Flip = true; // Fallthrough 1871 case ISD::SETULE: 1872 case ISD::SETLE: X86CC = X86::COND_BE; break; 1873 case ISD::SETONE: 1874 case ISD::SETNE: X86CC = X86::COND_NE; break; 1875 case ISD::SETUO: X86CC = X86::COND_P; break; 1876 case ISD::SETO: X86CC = X86::COND_NP; break; 1877 } 1878 if (Flip) 1879 std::swap(LHS, RHS); 1880 } 1881 1882 return X86CC != X86::COND_INVALID; 1883} 1884 1885/// hasFPCMov - is there a floating point cmov for the specific X86 condition 1886/// code. Current x86 isa includes the following FP cmov instructions: 1887/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 1888static bool hasFPCMov(unsigned X86CC) { 1889 switch (X86CC) { 1890 default: 1891 return false; 1892 case X86::COND_B: 1893 case X86::COND_BE: 1894 case X86::COND_E: 1895 case X86::COND_P: 1896 case X86::COND_A: 1897 case X86::COND_AE: 1898 case X86::COND_NE: 1899 case X86::COND_NP: 1900 return true; 1901 } 1902} 1903 1904/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return 1905/// true if Op is undef or if its value falls within the specified range (L, H]. 1906static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { 1907 if (Op.getOpcode() == ISD::UNDEF) 1908 return true; 1909 1910 unsigned Val = cast<ConstantSDNode>(Op)->getValue(); 1911 return (Val >= Low && Val < Hi); 1912} 1913 1914/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return 1915/// true if Op is undef or if its value equal to the specified value. 1916static bool isUndefOrEqual(SDOperand Op, unsigned Val) { 1917 if (Op.getOpcode() == ISD::UNDEF) 1918 return true; 1919 return cast<ConstantSDNode>(Op)->getValue() == Val; 1920} 1921 1922/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand 1923/// specifies a shuffle of elements that is suitable for input to PSHUFD. 1924bool X86::isPSHUFDMask(SDNode *N) { 1925 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1926 1927 if (N->getNumOperands() != 2 && N->getNumOperands() != 4) 1928 return false; 1929 1930 // Check if the value doesn't reference the second vector. 1931 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1932 SDOperand Arg = N->getOperand(i); 1933 if (Arg.getOpcode() == ISD::UNDEF) continue; 1934 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1935 if (cast<ConstantSDNode>(Arg)->getValue() >= e) 1936 return false; 1937 } 1938 1939 return true; 1940} 1941 1942/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand 1943/// specifies a shuffle of elements that is suitable for input to PSHUFHW. 1944bool X86::isPSHUFHWMask(SDNode *N) { 1945 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1946 1947 if (N->getNumOperands() != 8) 1948 return false; 1949 1950 // Lower quadword copied in order. 1951 for (unsigned i = 0; i != 4; ++i) { 1952 SDOperand Arg = N->getOperand(i); 1953 if (Arg.getOpcode() == ISD::UNDEF) continue; 1954 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1955 if (cast<ConstantSDNode>(Arg)->getValue() != i) 1956 return false; 1957 } 1958 1959 // Upper quadword shuffled. 1960 for (unsigned i = 4; i != 8; ++i) { 1961 SDOperand Arg = N->getOperand(i); 1962 if (Arg.getOpcode() == ISD::UNDEF) continue; 1963 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1964 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 1965 if (Val < 4 || Val > 7) 1966 return false; 1967 } 1968 1969 return true; 1970} 1971 1972/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand 1973/// specifies a shuffle of elements that is suitable for input to PSHUFLW. 1974bool X86::isPSHUFLWMask(SDNode *N) { 1975 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1976 1977 if (N->getNumOperands() != 8) 1978 return false; 1979 1980 // Upper quadword copied in order. 1981 for (unsigned i = 4; i != 8; ++i) 1982 if (!isUndefOrEqual(N->getOperand(i), i)) 1983 return false; 1984 1985 // Lower quadword shuffled. 1986 for (unsigned i = 0; i != 4; ++i) 1987 if (!isUndefOrInRange(N->getOperand(i), 0, 4)) 1988 return false; 1989 1990 return true; 1991} 1992 1993/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 1994/// specifies a shuffle of elements that is suitable for input to SHUFP*. 1995static bool isSHUFPMask(const SDOperand *Elems, unsigned NumElems) { 1996 if (NumElems != 2 && NumElems != 4) return false; 1997 1998 unsigned Half = NumElems / 2; 1999 for (unsigned i = 0; i < Half; ++i) 2000 if (!isUndefOrInRange(Elems[i], 0, NumElems)) 2001 return false; 2002 for (unsigned i = Half; i < NumElems; ++i) 2003 if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2)) 2004 return false; 2005 2006 return true; 2007} 2008 2009bool X86::isSHUFPMask(SDNode *N) { 2010 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2011 return ::isSHUFPMask(N->op_begin(), N->getNumOperands()); 2012} 2013 2014/// isCommutedSHUFP - Returns true if the shuffle mask is exactly 2015/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 2016/// half elements to come from vector 1 (which would equal the dest.) and 2017/// the upper half to come from vector 2. 2018static bool isCommutedSHUFP(const SDOperand *Ops, unsigned NumOps) { 2019 if (NumOps != 2 && NumOps != 4) return false; 2020 2021 unsigned Half = NumOps / 2; 2022 for (unsigned i = 0; i < Half; ++i) 2023 if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2)) 2024 return false; 2025 for (unsigned i = Half; i < NumOps; ++i) 2026 if (!isUndefOrInRange(Ops[i], 0, NumOps)) 2027 return false; 2028 return true; 2029} 2030 2031static bool isCommutedSHUFP(SDNode *N) { 2032 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2033 return isCommutedSHUFP(N->op_begin(), N->getNumOperands()); 2034} 2035 2036/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 2037/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 2038bool X86::isMOVHLPSMask(SDNode *N) { 2039 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2040 2041 if (N->getNumOperands() != 4) 2042 return false; 2043 2044 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 2045 return isUndefOrEqual(N->getOperand(0), 6) && 2046 isUndefOrEqual(N->getOperand(1), 7) && 2047 isUndefOrEqual(N->getOperand(2), 2) && 2048 isUndefOrEqual(N->getOperand(3), 3); 2049} 2050 2051/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 2052/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 2053/// <2, 3, 2, 3> 2054bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) { 2055 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2056 2057 if (N->getNumOperands() != 4) 2058 return false; 2059 2060 // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3 2061 return isUndefOrEqual(N->getOperand(0), 2) && 2062 isUndefOrEqual(N->getOperand(1), 3) && 2063 isUndefOrEqual(N->getOperand(2), 2) && 2064 isUndefOrEqual(N->getOperand(3), 3); 2065} 2066 2067/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 2068/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 2069bool X86::isMOVLPMask(SDNode *N) { 2070 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2071 2072 unsigned NumElems = N->getNumOperands(); 2073 if (NumElems != 2 && NumElems != 4) 2074 return false; 2075 2076 for (unsigned i = 0; i < NumElems/2; ++i) 2077 if (!isUndefOrEqual(N->getOperand(i), i + NumElems)) 2078 return false; 2079 2080 for (unsigned i = NumElems/2; i < NumElems; ++i) 2081 if (!isUndefOrEqual(N->getOperand(i), i)) 2082 return false; 2083 2084 return true; 2085} 2086 2087/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand 2088/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D} 2089/// and MOVLHPS. 2090bool X86::isMOVHPMask(SDNode *N) { 2091 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2092 2093 unsigned NumElems = N->getNumOperands(); 2094 if (NumElems != 2 && NumElems != 4) 2095 return false; 2096 2097 for (unsigned i = 0; i < NumElems/2; ++i) 2098 if (!isUndefOrEqual(N->getOperand(i), i)) 2099 return false; 2100 2101 for (unsigned i = 0; i < NumElems/2; ++i) { 2102 SDOperand Arg = N->getOperand(i + NumElems/2); 2103 if (!isUndefOrEqual(Arg, i + NumElems)) 2104 return false; 2105 } 2106 2107 return true; 2108} 2109 2110/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 2111/// specifies a shuffle of elements that is suitable for input to UNPCKL. 2112bool static isUNPCKLMask(const SDOperand *Elts, unsigned NumElts, 2113 bool V2IsSplat = false) { 2114 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2115 return false; 2116 2117 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2118 SDOperand BitI = Elts[i]; 2119 SDOperand BitI1 = Elts[i+1]; 2120 if (!isUndefOrEqual(BitI, j)) 2121 return false; 2122 if (V2IsSplat) { 2123 if (isUndefOrEqual(BitI1, NumElts)) 2124 return false; 2125 } else { 2126 if (!isUndefOrEqual(BitI1, j + NumElts)) 2127 return false; 2128 } 2129 } 2130 2131 return true; 2132} 2133 2134bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) { 2135 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2136 return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2137} 2138 2139/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 2140/// specifies a shuffle of elements that is suitable for input to UNPCKH. 2141bool static isUNPCKHMask(const SDOperand *Elts, unsigned NumElts, 2142 bool V2IsSplat = false) { 2143 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2144 return false; 2145 2146 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2147 SDOperand BitI = Elts[i]; 2148 SDOperand BitI1 = Elts[i+1]; 2149 if (!isUndefOrEqual(BitI, j + NumElts/2)) 2150 return false; 2151 if (V2IsSplat) { 2152 if (isUndefOrEqual(BitI1, NumElts)) 2153 return false; 2154 } else { 2155 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts)) 2156 return false; 2157 } 2158 } 2159 2160 return true; 2161} 2162 2163bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) { 2164 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2165 return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2166} 2167 2168/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 2169/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 2170/// <0, 0, 1, 1> 2171bool X86::isUNPCKL_v_undef_Mask(SDNode *N) { 2172 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2173 2174 unsigned NumElems = N->getNumOperands(); 2175 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2176 return false; 2177 2178 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2179 SDOperand BitI = N->getOperand(i); 2180 SDOperand BitI1 = N->getOperand(i+1); 2181 2182 if (!isUndefOrEqual(BitI, j)) 2183 return false; 2184 if (!isUndefOrEqual(BitI1, j)) 2185 return false; 2186 } 2187 2188 return true; 2189} 2190 2191/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 2192/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 2193/// <2, 2, 3, 3> 2194bool X86::isUNPCKH_v_undef_Mask(SDNode *N) { 2195 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2196 2197 unsigned NumElems = N->getNumOperands(); 2198 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2199 return false; 2200 2201 for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { 2202 SDOperand BitI = N->getOperand(i); 2203 SDOperand BitI1 = N->getOperand(i + 1); 2204 2205 if (!isUndefOrEqual(BitI, j)) 2206 return false; 2207 if (!isUndefOrEqual(BitI1, j)) 2208 return false; 2209 } 2210 2211 return true; 2212} 2213 2214/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 2215/// specifies a shuffle of elements that is suitable for input to MOVSS, 2216/// MOVSD, and MOVD, i.e. setting the lowest element. 2217static bool isMOVLMask(const SDOperand *Elts, unsigned NumElts) { 2218 if (NumElts != 2 && NumElts != 4) 2219 return false; 2220 2221 if (!isUndefOrEqual(Elts[0], NumElts)) 2222 return false; 2223 2224 for (unsigned i = 1; i < NumElts; ++i) { 2225 if (!isUndefOrEqual(Elts[i], i)) 2226 return false; 2227 } 2228 2229 return true; 2230} 2231 2232bool X86::isMOVLMask(SDNode *N) { 2233 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2234 return ::isMOVLMask(N->op_begin(), N->getNumOperands()); 2235} 2236 2237/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 2238/// of what x86 movss want. X86 movs requires the lowest element to be lowest 2239/// element of vector 2 and the other elements to come from vector 1 in order. 2240static bool isCommutedMOVL(const SDOperand *Ops, unsigned NumOps, 2241 bool V2IsSplat = false, 2242 bool V2IsUndef = false) { 2243 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 2244 return false; 2245 2246 if (!isUndefOrEqual(Ops[0], 0)) 2247 return false; 2248 2249 for (unsigned i = 1; i < NumOps; ++i) { 2250 SDOperand Arg = Ops[i]; 2251 if (!(isUndefOrEqual(Arg, i+NumOps) || 2252 (V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) || 2253 (V2IsSplat && isUndefOrEqual(Arg, NumOps)))) 2254 return false; 2255 } 2256 2257 return true; 2258} 2259 2260static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false, 2261 bool V2IsUndef = false) { 2262 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2263 return isCommutedMOVL(N->op_begin(), N->getNumOperands(), 2264 V2IsSplat, V2IsUndef); 2265} 2266 2267/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2268/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 2269bool X86::isMOVSHDUPMask(SDNode *N) { 2270 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2271 2272 if (N->getNumOperands() != 4) 2273 return false; 2274 2275 // Expect 1, 1, 3, 3 2276 for (unsigned i = 0; i < 2; ++i) { 2277 SDOperand Arg = N->getOperand(i); 2278 if (Arg.getOpcode() == ISD::UNDEF) continue; 2279 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2280 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2281 if (Val != 1) return false; 2282 } 2283 2284 bool HasHi = false; 2285 for (unsigned i = 2; i < 4; ++i) { 2286 SDOperand Arg = N->getOperand(i); 2287 if (Arg.getOpcode() == ISD::UNDEF) continue; 2288 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2289 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2290 if (Val != 3) return false; 2291 HasHi = true; 2292 } 2293 2294 // Don't use movshdup if it can be done with a shufps. 2295 return HasHi; 2296} 2297 2298/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2299/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 2300bool X86::isMOVSLDUPMask(SDNode *N) { 2301 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2302 2303 if (N->getNumOperands() != 4) 2304 return false; 2305 2306 // Expect 0, 0, 2, 2 2307 for (unsigned i = 0; i < 2; ++i) { 2308 SDOperand Arg = N->getOperand(i); 2309 if (Arg.getOpcode() == ISD::UNDEF) continue; 2310 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2311 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2312 if (Val != 0) return false; 2313 } 2314 2315 bool HasHi = false; 2316 for (unsigned i = 2; i < 4; ++i) { 2317 SDOperand Arg = N->getOperand(i); 2318 if (Arg.getOpcode() == ISD::UNDEF) continue; 2319 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2320 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2321 if (Val != 2) return false; 2322 HasHi = true; 2323 } 2324 2325 // Don't use movshdup if it can be done with a shufps. 2326 return HasHi; 2327} 2328 2329/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand 2330/// specifies a identity operation on the LHS or RHS. 2331static bool isIdentityMask(SDNode *N, bool RHS = false) { 2332 unsigned NumElems = N->getNumOperands(); 2333 for (unsigned i = 0; i < NumElems; ++i) 2334 if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0))) 2335 return false; 2336 return true; 2337} 2338 2339/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2340/// a splat of a single element. 2341static bool isSplatMask(SDNode *N) { 2342 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2343 2344 // This is a splat operation if each element of the permute is the same, and 2345 // if the value doesn't reference the second vector. 2346 unsigned NumElems = N->getNumOperands(); 2347 SDOperand ElementBase; 2348 unsigned i = 0; 2349 for (; i != NumElems; ++i) { 2350 SDOperand Elt = N->getOperand(i); 2351 if (isa<ConstantSDNode>(Elt)) { 2352 ElementBase = Elt; 2353 break; 2354 } 2355 } 2356 2357 if (!ElementBase.Val) 2358 return false; 2359 2360 for (; i != NumElems; ++i) { 2361 SDOperand Arg = N->getOperand(i); 2362 if (Arg.getOpcode() == ISD::UNDEF) continue; 2363 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2364 if (Arg != ElementBase) return false; 2365 } 2366 2367 // Make sure it is a splat of the first vector operand. 2368 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems; 2369} 2370 2371/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2372/// a splat of a single element and it's a 2 or 4 element mask. 2373bool X86::isSplatMask(SDNode *N) { 2374 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2375 2376 // We can only splat 64-bit, and 32-bit quantities with a single instruction. 2377 if (N->getNumOperands() != 4 && N->getNumOperands() != 2) 2378 return false; 2379 return ::isSplatMask(N); 2380} 2381 2382/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand 2383/// specifies a splat of zero element. 2384bool X86::isSplatLoMask(SDNode *N) { 2385 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2386 2387 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) 2388 if (!isUndefOrEqual(N->getOperand(i), 0)) 2389 return false; 2390 return true; 2391} 2392 2393/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 2394/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* 2395/// instructions. 2396unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 2397 unsigned NumOperands = N->getNumOperands(); 2398 unsigned Shift = (NumOperands == 4) ? 2 : 1; 2399 unsigned Mask = 0; 2400 for (unsigned i = 0; i < NumOperands; ++i) { 2401 unsigned Val = 0; 2402 SDOperand Arg = N->getOperand(NumOperands-i-1); 2403 if (Arg.getOpcode() != ISD::UNDEF) 2404 Val = cast<ConstantSDNode>(Arg)->getValue(); 2405 if (Val >= NumOperands) Val -= NumOperands; 2406 Mask |= Val; 2407 if (i != NumOperands - 1) 2408 Mask <<= Shift; 2409 } 2410 2411 return Mask; 2412} 2413 2414/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 2415/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW 2416/// instructions. 2417unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 2418 unsigned Mask = 0; 2419 // 8 nodes, but we only care about the last 4. 2420 for (unsigned i = 7; i >= 4; --i) { 2421 unsigned Val = 0; 2422 SDOperand Arg = N->getOperand(i); 2423 if (Arg.getOpcode() != ISD::UNDEF) 2424 Val = cast<ConstantSDNode>(Arg)->getValue(); 2425 Mask |= (Val - 4); 2426 if (i != 4) 2427 Mask <<= 2; 2428 } 2429 2430 return Mask; 2431} 2432 2433/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 2434/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW 2435/// instructions. 2436unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 2437 unsigned Mask = 0; 2438 // 8 nodes, but we only care about the first 4. 2439 for (int i = 3; i >= 0; --i) { 2440 unsigned Val = 0; 2441 SDOperand Arg = N->getOperand(i); 2442 if (Arg.getOpcode() != ISD::UNDEF) 2443 Val = cast<ConstantSDNode>(Arg)->getValue(); 2444 Mask |= Val; 2445 if (i != 0) 2446 Mask <<= 2; 2447 } 2448 2449 return Mask; 2450} 2451 2452/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand 2453/// specifies a 8 element shuffle that can be broken into a pair of 2454/// PSHUFHW and PSHUFLW. 2455static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { 2456 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2457 2458 if (N->getNumOperands() != 8) 2459 return false; 2460 2461 // Lower quadword shuffled. 2462 for (unsigned i = 0; i != 4; ++i) { 2463 SDOperand Arg = N->getOperand(i); 2464 if (Arg.getOpcode() == ISD::UNDEF) continue; 2465 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2466 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2467 if (Val >= 4) 2468 return false; 2469 } 2470 2471 // Upper quadword shuffled. 2472 for (unsigned i = 4; i != 8; ++i) { 2473 SDOperand Arg = N->getOperand(i); 2474 if (Arg.getOpcode() == ISD::UNDEF) continue; 2475 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2476 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2477 if (Val < 4 || Val > 7) 2478 return false; 2479 } 2480 2481 return true; 2482} 2483 2484/// CommuteVectorShuffle - Swap vector_shuffle operands as well as 2485/// values in ther permute mask. 2486static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, 2487 SDOperand &V2, SDOperand &Mask, 2488 SelectionDAG &DAG) { 2489 MVT::ValueType VT = Op.getValueType(); 2490 MVT::ValueType MaskVT = Mask.getValueType(); 2491 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2492 unsigned NumElems = Mask.getNumOperands(); 2493 SmallVector<SDOperand, 8> MaskVec; 2494 2495 for (unsigned i = 0; i != NumElems; ++i) { 2496 SDOperand Arg = Mask.getOperand(i); 2497 if (Arg.getOpcode() == ISD::UNDEF) { 2498 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2499 continue; 2500 } 2501 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2502 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2503 if (Val < NumElems) 2504 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2505 else 2506 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2507 } 2508 2509 std::swap(V1, V2); 2510 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2511 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2512} 2513 2514/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 2515/// the two vector operands have swapped position. 2516static 2517SDOperand CommuteVectorShuffleMask(SDOperand Mask, SelectionDAG &DAG) { 2518 MVT::ValueType MaskVT = Mask.getValueType(); 2519 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2520 unsigned NumElems = Mask.getNumOperands(); 2521 SmallVector<SDOperand, 8> MaskVec; 2522 for (unsigned i = 0; i != NumElems; ++i) { 2523 SDOperand Arg = Mask.getOperand(i); 2524 if (Arg.getOpcode() == ISD::UNDEF) { 2525 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2526 continue; 2527 } 2528 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2529 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2530 if (Val < NumElems) 2531 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2532 else 2533 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2534 } 2535 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2536} 2537 2538 2539/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 2540/// match movhlps. The lower half elements should come from upper half of 2541/// V1 (and in order), and the upper half elements should come from the upper 2542/// half of V2 (and in order). 2543static bool ShouldXformToMOVHLPS(SDNode *Mask) { 2544 unsigned NumElems = Mask->getNumOperands(); 2545 if (NumElems != 4) 2546 return false; 2547 for (unsigned i = 0, e = 2; i != e; ++i) 2548 if (!isUndefOrEqual(Mask->getOperand(i), i+2)) 2549 return false; 2550 for (unsigned i = 2; i != 4; ++i) 2551 if (!isUndefOrEqual(Mask->getOperand(i), i+4)) 2552 return false; 2553 return true; 2554} 2555 2556/// isScalarLoadToVector - Returns true if the node is a scalar load that 2557/// is promoted to a vector. 2558static inline bool isScalarLoadToVector(SDNode *N) { 2559 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) { 2560 N = N->getOperand(0).Val; 2561 return ISD::isNON_EXTLoad(N); 2562 } 2563 return false; 2564} 2565 2566/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 2567/// match movlp{s|d}. The lower half elements should come from lower half of 2568/// V1 (and in order), and the upper half elements should come from the upper 2569/// half of V2 (and in order). And since V1 will become the source of the 2570/// MOVLP, it must be either a vector load or a scalar load to vector. 2571static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) { 2572 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 2573 return false; 2574 // Is V2 is a vector load, don't do this transformation. We will try to use 2575 // load folding shufps op. 2576 if (ISD::isNON_EXTLoad(V2)) 2577 return false; 2578 2579 unsigned NumElems = Mask->getNumOperands(); 2580 if (NumElems != 2 && NumElems != 4) 2581 return false; 2582 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 2583 if (!isUndefOrEqual(Mask->getOperand(i), i)) 2584 return false; 2585 for (unsigned i = NumElems/2; i != NumElems; ++i) 2586 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems)) 2587 return false; 2588 return true; 2589} 2590 2591/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 2592/// all the same. 2593static bool isSplatVector(SDNode *N) { 2594 if (N->getOpcode() != ISD::BUILD_VECTOR) 2595 return false; 2596 2597 SDOperand SplatValue = N->getOperand(0); 2598 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 2599 if (N->getOperand(i) != SplatValue) 2600 return false; 2601 return true; 2602} 2603 2604/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2605/// to an undef. 2606static bool isUndefShuffle(SDNode *N) { 2607 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2608 return false; 2609 2610 SDOperand V1 = N->getOperand(0); 2611 SDOperand V2 = N->getOperand(1); 2612 SDOperand Mask = N->getOperand(2); 2613 unsigned NumElems = Mask.getNumOperands(); 2614 for (unsigned i = 0; i != NumElems; ++i) { 2615 SDOperand Arg = Mask.getOperand(i); 2616 if (Arg.getOpcode() != ISD::UNDEF) { 2617 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2618 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF) 2619 return false; 2620 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF) 2621 return false; 2622 } 2623 } 2624 return true; 2625} 2626 2627/// isZeroNode - Returns true if Elt is a constant zero or a floating point 2628/// constant +0.0. 2629static inline bool isZeroNode(SDOperand Elt) { 2630 return ((isa<ConstantSDNode>(Elt) && 2631 cast<ConstantSDNode>(Elt)->getValue() == 0) || 2632 (isa<ConstantFPSDNode>(Elt) && 2633 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 2634} 2635 2636/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2637/// to an zero vector. 2638static bool isZeroShuffle(SDNode *N) { 2639 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2640 return false; 2641 2642 SDOperand V1 = N->getOperand(0); 2643 SDOperand V2 = N->getOperand(1); 2644 SDOperand Mask = N->getOperand(2); 2645 unsigned NumElems = Mask.getNumOperands(); 2646 for (unsigned i = 0; i != NumElems; ++i) { 2647 SDOperand Arg = Mask.getOperand(i); 2648 if (Arg.getOpcode() == ISD::UNDEF) 2649 continue; 2650 2651 unsigned Idx = cast<ConstantSDNode>(Arg)->getValue(); 2652 if (Idx < NumElems) { 2653 unsigned Opc = V1.Val->getOpcode(); 2654 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.Val)) 2655 continue; 2656 if (Opc != ISD::BUILD_VECTOR || 2657 !isZeroNode(V1.Val->getOperand(Idx))) 2658 return false; 2659 } else if (Idx >= NumElems) { 2660 unsigned Opc = V2.Val->getOpcode(); 2661 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.Val)) 2662 continue; 2663 if (Opc != ISD::BUILD_VECTOR || 2664 !isZeroNode(V2.Val->getOperand(Idx - NumElems))) 2665 return false; 2666 } 2667 } 2668 return true; 2669} 2670 2671/// getZeroVector - Returns a vector of specified type with all zero elements. 2672/// 2673static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) { 2674 assert(MVT::isVector(VT) && "Expected a vector type"); 2675 2676 // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2677 // type. This ensures they get CSE'd. 2678 SDOperand Cst = DAG.getTargetConstant(0, MVT::i32); 2679 SDOperand Vec; 2680 if (MVT::getSizeInBits(VT) == 64) // MMX 2681 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2682 else // SSE 2683 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2684 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2685} 2686 2687/// getOnesVector - Returns a vector of specified type with all bits set. 2688/// 2689static SDOperand getOnesVector(MVT::ValueType VT, SelectionDAG &DAG) { 2690 assert(MVT::isVector(VT) && "Expected a vector type"); 2691 2692 // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2693 // type. This ensures they get CSE'd. 2694 SDOperand Cst = DAG.getTargetConstant(~0U, MVT::i32); 2695 SDOperand Vec; 2696 if (MVT::getSizeInBits(VT) == 64) // MMX 2697 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2698 else // SSE 2699 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2700 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2701} 2702 2703 2704/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 2705/// that point to V2 points to its first element. 2706static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { 2707 assert(Mask.getOpcode() == ISD::BUILD_VECTOR); 2708 2709 bool Changed = false; 2710 SmallVector<SDOperand, 8> MaskVec; 2711 unsigned NumElems = Mask.getNumOperands(); 2712 for (unsigned i = 0; i != NumElems; ++i) { 2713 SDOperand Arg = Mask.getOperand(i); 2714 if (Arg.getOpcode() != ISD::UNDEF) { 2715 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2716 if (Val > NumElems) { 2717 Arg = DAG.getConstant(NumElems, Arg.getValueType()); 2718 Changed = true; 2719 } 2720 } 2721 MaskVec.push_back(Arg); 2722 } 2723 2724 if (Changed) 2725 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), 2726 &MaskVec[0], MaskVec.size()); 2727 return Mask; 2728} 2729 2730/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 2731/// operation of specified width. 2732static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { 2733 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2734 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2735 2736 SmallVector<SDOperand, 8> MaskVec; 2737 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); 2738 for (unsigned i = 1; i != NumElems; ++i) 2739 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2740 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2741} 2742 2743/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation 2744/// of specified width. 2745static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { 2746 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2747 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2748 SmallVector<SDOperand, 8> MaskVec; 2749 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 2750 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2751 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); 2752 } 2753 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2754} 2755 2756/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation 2757/// of specified width. 2758static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { 2759 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2760 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2761 unsigned Half = NumElems/2; 2762 SmallVector<SDOperand, 8> MaskVec; 2763 for (unsigned i = 0; i != Half; ++i) { 2764 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); 2765 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); 2766 } 2767 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2768} 2769 2770/// getSwapEltZeroMask - Returns a vector_shuffle mask for a shuffle that swaps 2771/// element #0 of a vector with the specified index, leaving the rest of the 2772/// elements in place. 2773static SDOperand getSwapEltZeroMask(unsigned NumElems, unsigned DestElt, 2774 SelectionDAG &DAG) { 2775 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2776 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2777 SmallVector<SDOperand, 8> MaskVec; 2778 // Element #0 of the result gets the elt we are replacing. 2779 MaskVec.push_back(DAG.getConstant(DestElt, BaseVT)); 2780 for (unsigned i = 1; i != NumElems; ++i) 2781 MaskVec.push_back(DAG.getConstant(i == DestElt ? 0 : i, BaseVT)); 2782 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2783} 2784 2785/// PromoteSplat - Promote a splat of v4f32, v8i16 or v16i8 to v4i32. 2786static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG, bool HasSSE2) { 2787 MVT::ValueType PVT = HasSSE2 ? MVT::v4i32 : MVT::v4f32; 2788 MVT::ValueType VT = Op.getValueType(); 2789 if (PVT == VT) 2790 return Op; 2791 SDOperand V1 = Op.getOperand(0); 2792 SDOperand Mask = Op.getOperand(2); 2793 unsigned NumElems = Mask.getNumOperands(); 2794 // Special handling of v4f32 -> v4i32. 2795 if (VT != MVT::v4f32) { 2796 Mask = getUnpacklMask(NumElems, DAG); 2797 while (NumElems > 4) { 2798 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask); 2799 NumElems >>= 1; 2800 } 2801 Mask = getZeroVector(MVT::v4i32, DAG); 2802 } 2803 2804 V1 = DAG.getNode(ISD::BIT_CONVERT, PVT, V1); 2805 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, PVT, V1, 2806 DAG.getNode(ISD::UNDEF, PVT), Mask); 2807 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); 2808} 2809 2810/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 2811/// vector of zero or undef vector. This produces a shuffle where the low 2812/// element of V2 is swizzled into the zero/undef vector, landing at element 2813/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 2814static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, unsigned Idx, 2815 bool isZero, SelectionDAG &DAG) { 2816 MVT::ValueType VT = V2.getValueType(); 2817 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT); 2818 unsigned NumElems = MVT::getVectorNumElements(V2.getValueType()); 2819 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2820 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 2821 SmallVector<SDOperand, 16> MaskVec; 2822 for (unsigned i = 0; i != NumElems; ++i) 2823 if (i == Idx) // If this is the insertion idx, put the low elt of V2 here. 2824 MaskVec.push_back(DAG.getConstant(NumElems, EVT)); 2825 else 2826 MaskVec.push_back(DAG.getConstant(i, EVT)); 2827 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2828 &MaskVec[0], MaskVec.size()); 2829 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2830} 2831 2832/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 2833/// 2834static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, 2835 unsigned NumNonZero, unsigned NumZero, 2836 SelectionDAG &DAG, TargetLowering &TLI) { 2837 if (NumNonZero > 8) 2838 return SDOperand(); 2839 2840 SDOperand V(0, 0); 2841 bool First = true; 2842 for (unsigned i = 0; i < 16; ++i) { 2843 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 2844 if (ThisIsNonZero && First) { 2845 if (NumZero) 2846 V = getZeroVector(MVT::v8i16, DAG); 2847 else 2848 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2849 First = false; 2850 } 2851 2852 if ((i & 1) != 0) { 2853 SDOperand ThisElt(0, 0), LastElt(0, 0); 2854 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 2855 if (LastIsNonZero) { 2856 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); 2857 } 2858 if (ThisIsNonZero) { 2859 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i)); 2860 ThisElt = DAG.getNode(ISD::SHL, MVT::i16, 2861 ThisElt, DAG.getConstant(8, MVT::i8)); 2862 if (LastIsNonZero) 2863 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt); 2864 } else 2865 ThisElt = LastElt; 2866 2867 if (ThisElt.Val) 2868 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt, 2869 DAG.getIntPtrConstant(i/2)); 2870 } 2871 } 2872 2873 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V); 2874} 2875 2876/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 2877/// 2878static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, 2879 unsigned NumNonZero, unsigned NumZero, 2880 SelectionDAG &DAG, TargetLowering &TLI) { 2881 if (NumNonZero > 4) 2882 return SDOperand(); 2883 2884 SDOperand V(0, 0); 2885 bool First = true; 2886 for (unsigned i = 0; i < 8; ++i) { 2887 bool isNonZero = (NonZeros & (1 << i)) != 0; 2888 if (isNonZero) { 2889 if (First) { 2890 if (NumZero) 2891 V = getZeroVector(MVT::v8i16, DAG); 2892 else 2893 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2894 First = false; 2895 } 2896 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i), 2897 DAG.getIntPtrConstant(i)); 2898 } 2899 } 2900 2901 return V; 2902} 2903 2904SDOperand 2905X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 2906 // All zero's are handled with pxor, all one's are handled with pcmpeqd. 2907 if (ISD::isBuildVectorAllZeros(Op.Val) || ISD::isBuildVectorAllOnes(Op.Val)) { 2908 // Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to 2909 // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are 2910 // eliminated on x86-32 hosts. 2911 if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32) 2912 return Op; 2913 2914 if (ISD::isBuildVectorAllOnes(Op.Val)) 2915 return getOnesVector(Op.getValueType(), DAG); 2916 return getZeroVector(Op.getValueType(), DAG); 2917 } 2918 2919 MVT::ValueType VT = Op.getValueType(); 2920 MVT::ValueType EVT = MVT::getVectorElementType(VT); 2921 unsigned EVTBits = MVT::getSizeInBits(EVT); 2922 2923 unsigned NumElems = Op.getNumOperands(); 2924 unsigned NumZero = 0; 2925 unsigned NumNonZero = 0; 2926 unsigned NonZeros = 0; 2927 bool IsAllConstants = true; 2928 SmallSet<SDOperand, 8> Values; 2929 for (unsigned i = 0; i < NumElems; ++i) { 2930 SDOperand Elt = Op.getOperand(i); 2931 if (Elt.getOpcode() == ISD::UNDEF) 2932 continue; 2933 Values.insert(Elt); 2934 if (Elt.getOpcode() != ISD::Constant && 2935 Elt.getOpcode() != ISD::ConstantFP) 2936 IsAllConstants = false; 2937 if (isZeroNode(Elt)) 2938 NumZero++; 2939 else { 2940 NonZeros |= (1 << i); 2941 NumNonZero++; 2942 } 2943 } 2944 2945 if (NumNonZero == 0) { 2946 // All undef vector. Return an UNDEF. All zero vectors were handled above. 2947 return DAG.getNode(ISD::UNDEF, VT); 2948 } 2949 2950 // Special case for single non-zero, non-undef, element. 2951 if (NumNonZero == 1 && NumElems <= 4) { 2952 unsigned Idx = CountTrailingZeros_32(NonZeros); 2953 SDOperand Item = Op.getOperand(Idx); 2954 2955 // If this is an insertion of an i64 value on x86-32, and if the top bits of 2956 // the value are obviously zero, truncate the value to i32 and do the 2957 // insertion that way. Only do this if the value is non-constant or if the 2958 // value is a constant being inserted into element 0. It is cheaper to do 2959 // a constant pool load than it is to do a movd + shuffle. 2960 if (EVT == MVT::i64 && !Subtarget->is64Bit() && 2961 (!IsAllConstants || Idx == 0)) { 2962 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 2963 // Handle MMX and SSE both. 2964 MVT::ValueType VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32; 2965 MVT::ValueType VecElts = VT == MVT::v2i64 ? 4 : 2; 2966 2967 // Truncate the value (which may itself be a constant) to i32, and 2968 // convert it to a vector with movd (S2V+shuffle to zero extend). 2969 Item = DAG.getNode(ISD::TRUNCATE, MVT::i32, Item); 2970 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VecVT, Item); 2971 Item = getShuffleVectorZeroOrUndef(Item, 0, true, DAG); 2972 2973 // Now we have our 32-bit value zero extended in the low element of 2974 // a vector. If Idx != 0, swizzle it into place. 2975 if (Idx != 0) { 2976 SDOperand Ops[] = { 2977 Item, DAG.getNode(ISD::UNDEF, Item.getValueType()), 2978 getSwapEltZeroMask(VecElts, Idx, DAG) 2979 }; 2980 Item = DAG.getNode(ISD::VECTOR_SHUFFLE, VecVT, Ops, 3); 2981 } 2982 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Item); 2983 } 2984 } 2985 2986 // If we have a constant or non-constant insertion into the low element of 2987 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 2988 // the rest of the elements. This will be matched as movd/movq/movss/movsd 2989 // depending on what the source datatype is. Because we can only get here 2990 // when NumElems <= 4, this only needs to handle i32/f32/i64/f64. 2991 if (Idx == 0 && 2992 // Don't do this for i64 values on x86-32. 2993 (EVT != MVT::i64 || Subtarget->is64Bit())) { 2994 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 2995 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 2996 return getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, DAG); 2997 } 2998 2999 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 3000 return SDOperand(); 3001 3002 // Otherwise, if this is a vector with i32 or f32 elements, and the element 3003 // is a non-constant being inserted into an element other than the low one, 3004 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 3005 // movd/movss) to move this into the low element, then shuffle it into 3006 // place. 3007 if (EVTBits == 32) { 3008 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 3009 3010 // Turn it into a shuffle of zero and zero-extended scalar to vector. 3011 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, DAG); 3012 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3013 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3014 SmallVector<SDOperand, 8> MaskVec; 3015 for (unsigned i = 0; i < NumElems; i++) 3016 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); 3017 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3018 &MaskVec[0], MaskVec.size()); 3019 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item, 3020 DAG.getNode(ISD::UNDEF, VT), Mask); 3021 } 3022 } 3023 3024 // Splat is obviously ok. Let legalizer expand it to a shuffle. 3025 if (Values.size() == 1) 3026 return SDOperand(); 3027 3028 // A vector full of immediates; various special cases are already 3029 // handled, so this is best done with a single constant-pool load. 3030 if (IsAllConstants) 3031 return SDOperand(); 3032 3033 // Let legalizer expand 2-wide build_vectors. 3034 if (EVTBits == 64) 3035 return SDOperand(); 3036 3037 // If element VT is < 32 bits, convert it to inserts into a zero vector. 3038 if (EVTBits == 8 && NumElems == 16) { 3039 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 3040 *this); 3041 if (V.Val) return V; 3042 } 3043 3044 if (EVTBits == 16 && NumElems == 8) { 3045 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 3046 *this); 3047 if (V.Val) return V; 3048 } 3049 3050 // If element VT is == 32 bits, turn it into a number of shuffles. 3051 SmallVector<SDOperand, 8> V; 3052 V.resize(NumElems); 3053 if (NumElems == 4 && NumZero > 0) { 3054 for (unsigned i = 0; i < 4; ++i) { 3055 bool isZero = !(NonZeros & (1 << i)); 3056 if (isZero) 3057 V[i] = getZeroVector(VT, DAG); 3058 else 3059 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3060 } 3061 3062 for (unsigned i = 0; i < 2; ++i) { 3063 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 3064 default: break; 3065 case 0: 3066 V[i] = V[i*2]; // Must be a zero vector. 3067 break; 3068 case 1: 3069 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2], 3070 getMOVLMask(NumElems, DAG)); 3071 break; 3072 case 2: 3073 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3074 getMOVLMask(NumElems, DAG)); 3075 break; 3076 case 3: 3077 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3078 getUnpacklMask(NumElems, DAG)); 3079 break; 3080 } 3081 } 3082 3083 // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd) 3084 // clears the upper bits. 3085 // FIXME: we can do the same for v4f32 case when we know both parts of 3086 // the lower half come from scalar_to_vector (loadf32). We should do 3087 // that in post legalizer dag combiner with target specific hooks. 3088 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0) 3089 return V[0]; 3090 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3091 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 3092 SmallVector<SDOperand, 8> MaskVec; 3093 bool Reverse = (NonZeros & 0x3) == 2; 3094 for (unsigned i = 0; i < 2; ++i) 3095 if (Reverse) 3096 MaskVec.push_back(DAG.getConstant(1-i, EVT)); 3097 else 3098 MaskVec.push_back(DAG.getConstant(i, EVT)); 3099 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 3100 for (unsigned i = 0; i < 2; ++i) 3101 if (Reverse) 3102 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); 3103 else 3104 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); 3105 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3106 &MaskVec[0], MaskVec.size()); 3107 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask); 3108 } 3109 3110 if (Values.size() > 2) { 3111 // Expand into a number of unpckl*. 3112 // e.g. for v4f32 3113 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 3114 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 3115 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 3116 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG); 3117 for (unsigned i = 0; i < NumElems; ++i) 3118 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3119 NumElems >>= 1; 3120 while (NumElems != 0) { 3121 for (unsigned i = 0; i < NumElems; ++i) 3122 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems], 3123 UnpckMask); 3124 NumElems >>= 1; 3125 } 3126 return V[0]; 3127 } 3128 3129 return SDOperand(); 3130} 3131 3132static 3133SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, 3134 SDOperand PermMask, SelectionDAG &DAG, 3135 TargetLowering &TLI) { 3136 SDOperand NewV; 3137 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(8); 3138 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3139 MVT::ValueType PtrVT = TLI.getPointerTy(); 3140 SmallVector<SDOperand, 8> MaskElts(PermMask.Val->op_begin(), 3141 PermMask.Val->op_end()); 3142 3143 // First record which half of which vector the low elements come from. 3144 SmallVector<unsigned, 4> LowQuad(4); 3145 for (unsigned i = 0; i < 4; ++i) { 3146 SDOperand Elt = MaskElts[i]; 3147 if (Elt.getOpcode() == ISD::UNDEF) 3148 continue; 3149 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3150 int QuadIdx = EltIdx / 4; 3151 ++LowQuad[QuadIdx]; 3152 } 3153 int BestLowQuad = -1; 3154 unsigned MaxQuad = 1; 3155 for (unsigned i = 0; i < 4; ++i) { 3156 if (LowQuad[i] > MaxQuad) { 3157 BestLowQuad = i; 3158 MaxQuad = LowQuad[i]; 3159 } 3160 } 3161 3162 // Record which half of which vector the high elements come from. 3163 SmallVector<unsigned, 4> HighQuad(4); 3164 for (unsigned i = 4; i < 8; ++i) { 3165 SDOperand Elt = MaskElts[i]; 3166 if (Elt.getOpcode() == ISD::UNDEF) 3167 continue; 3168 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3169 int QuadIdx = EltIdx / 4; 3170 ++HighQuad[QuadIdx]; 3171 } 3172 int BestHighQuad = -1; 3173 MaxQuad = 1; 3174 for (unsigned i = 0; i < 4; ++i) { 3175 if (HighQuad[i] > MaxQuad) { 3176 BestHighQuad = i; 3177 MaxQuad = HighQuad[i]; 3178 } 3179 } 3180 3181 // If it's possible to sort parts of either half with PSHUF{H|L}W, then do it. 3182 if (BestLowQuad != -1 || BestHighQuad != -1) { 3183 // First sort the 4 chunks in order using shufpd. 3184 SmallVector<SDOperand, 8> MaskVec; 3185 if (BestLowQuad != -1) 3186 MaskVec.push_back(DAG.getConstant(BestLowQuad, MVT::i32)); 3187 else 3188 MaskVec.push_back(DAG.getConstant(0, MVT::i32)); 3189 if (BestHighQuad != -1) 3190 MaskVec.push_back(DAG.getConstant(BestHighQuad, MVT::i32)); 3191 else 3192 MaskVec.push_back(DAG.getConstant(1, MVT::i32)); 3193 SDOperand Mask= DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, &MaskVec[0],2); 3194 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64, 3195 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V1), 3196 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V2), Mask); 3197 NewV = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, NewV); 3198 3199 // Now sort high and low parts separately. 3200 BitVector InOrder(8); 3201 if (BestLowQuad != -1) { 3202 // Sort lower half in order using PSHUFLW. 3203 MaskVec.clear(); 3204 bool AnyOutOrder = false; 3205 for (unsigned i = 0; i != 4; ++i) { 3206 SDOperand Elt = MaskElts[i]; 3207 if (Elt.getOpcode() == ISD::UNDEF) { 3208 MaskVec.push_back(Elt); 3209 InOrder.set(i); 3210 } else { 3211 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3212 if (EltIdx != i) 3213 AnyOutOrder = true; 3214 MaskVec.push_back(DAG.getConstant(EltIdx % 4, MaskEVT)); 3215 // If this element is in the right place after this shuffle, then 3216 // remember it. 3217 if ((int)(EltIdx / 4) == BestLowQuad) 3218 InOrder.set(i); 3219 } 3220 } 3221 if (AnyOutOrder) { 3222 for (unsigned i = 4; i != 8; ++i) 3223 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3224 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3225 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3226 } 3227 } 3228 3229 if (BestHighQuad != -1) { 3230 // Sort high half in order using PSHUFHW if possible. 3231 MaskVec.clear(); 3232 for (unsigned i = 0; i != 4; ++i) 3233 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3234 bool AnyOutOrder = false; 3235 for (unsigned i = 4; i != 8; ++i) { 3236 SDOperand Elt = MaskElts[i]; 3237 if (Elt.getOpcode() == ISD::UNDEF) { 3238 MaskVec.push_back(Elt); 3239 InOrder.set(i); 3240 } else { 3241 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3242 if (EltIdx != i) 3243 AnyOutOrder = true; 3244 MaskVec.push_back(DAG.getConstant((EltIdx % 4) + 4, MaskEVT)); 3245 // If this element is in the right place after this shuffle, then 3246 // remember it. 3247 if ((int)(EltIdx / 4) == BestHighQuad) 3248 InOrder.set(i); 3249 } 3250 } 3251 if (AnyOutOrder) { 3252 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3253 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3254 } 3255 } 3256 3257 // The other elements are put in the right place using pextrw and pinsrw. 3258 for (unsigned i = 0; i != 8; ++i) { 3259 if (InOrder[i]) 3260 continue; 3261 SDOperand Elt = MaskElts[i]; 3262 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3263 if (EltIdx == i) 3264 continue; 3265 SDOperand ExtOp = (EltIdx < 8) 3266 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3267 DAG.getConstant(EltIdx, PtrVT)) 3268 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3269 DAG.getConstant(EltIdx - 8, PtrVT)); 3270 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3271 DAG.getConstant(i, PtrVT)); 3272 } 3273 return NewV; 3274 } 3275 3276 // PSHUF{H|L}W are not used. Lower into extracts and inserts but try to use 3277 ///as few as possible. 3278 // First, let's find out how many elements are already in the right order. 3279 unsigned V1InOrder = 0; 3280 unsigned V1FromV1 = 0; 3281 unsigned V2InOrder = 0; 3282 unsigned V2FromV2 = 0; 3283 SmallVector<SDOperand, 8> V1Elts; 3284 SmallVector<SDOperand, 8> V2Elts; 3285 for (unsigned i = 0; i < 8; ++i) { 3286 SDOperand Elt = MaskElts[i]; 3287 if (Elt.getOpcode() == ISD::UNDEF) { 3288 V1Elts.push_back(Elt); 3289 V2Elts.push_back(Elt); 3290 ++V1InOrder; 3291 ++V2InOrder; 3292 continue; 3293 } 3294 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3295 if (EltIdx == i) { 3296 V1Elts.push_back(Elt); 3297 V2Elts.push_back(DAG.getConstant(i+8, MaskEVT)); 3298 ++V1InOrder; 3299 } else if (EltIdx == i+8) { 3300 V1Elts.push_back(Elt); 3301 V2Elts.push_back(DAG.getConstant(i, MaskEVT)); 3302 ++V2InOrder; 3303 } else if (EltIdx < 8) { 3304 V1Elts.push_back(Elt); 3305 ++V1FromV1; 3306 } else { 3307 V2Elts.push_back(DAG.getConstant(EltIdx-8, MaskEVT)); 3308 ++V2FromV2; 3309 } 3310 } 3311 3312 if (V2InOrder > V1InOrder) { 3313 PermMask = CommuteVectorShuffleMask(PermMask, DAG); 3314 std::swap(V1, V2); 3315 std::swap(V1Elts, V2Elts); 3316 std::swap(V1FromV1, V2FromV2); 3317 } 3318 3319 if ((V1FromV1 + V1InOrder) != 8) { 3320 // Some elements are from V2. 3321 if (V1FromV1) { 3322 // If there are elements that are from V1 but out of place, 3323 // then first sort them in place 3324 SmallVector<SDOperand, 8> MaskVec; 3325 for (unsigned i = 0; i < 8; ++i) { 3326 SDOperand Elt = V1Elts[i]; 3327 if (Elt.getOpcode() == ISD::UNDEF) { 3328 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3329 continue; 3330 } 3331 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3332 if (EltIdx >= 8) 3333 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3334 else 3335 MaskVec.push_back(DAG.getConstant(EltIdx, MaskEVT)); 3336 } 3337 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3338 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, V1, V1, Mask); 3339 } 3340 3341 NewV = V1; 3342 for (unsigned i = 0; i < 8; ++i) { 3343 SDOperand Elt = V1Elts[i]; 3344 if (Elt.getOpcode() == ISD::UNDEF) 3345 continue; 3346 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3347 if (EltIdx < 8) 3348 continue; 3349 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3350 DAG.getConstant(EltIdx - 8, PtrVT)); 3351 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3352 DAG.getConstant(i, PtrVT)); 3353 } 3354 return NewV; 3355 } else { 3356 // All elements are from V1. 3357 NewV = V1; 3358 for (unsigned i = 0; i < 8; ++i) { 3359 SDOperand Elt = V1Elts[i]; 3360 if (Elt.getOpcode() == ISD::UNDEF) 3361 continue; 3362 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3363 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3364 DAG.getConstant(EltIdx, PtrVT)); 3365 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3366 DAG.getConstant(i, PtrVT)); 3367 } 3368 return NewV; 3369 } 3370} 3371 3372/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 3373/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be 3374/// done when every pair / quad of shuffle mask elements point to elements in 3375/// the right sequence. e.g. 3376/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15> 3377static 3378SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2, 3379 MVT::ValueType VT, 3380 SDOperand PermMask, SelectionDAG &DAG, 3381 TargetLowering &TLI) { 3382 unsigned NumElems = PermMask.getNumOperands(); 3383 unsigned NewWidth = (NumElems == 4) ? 2 : 4; 3384 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NewWidth); 3385 MVT::ValueType NewVT = MaskVT; 3386 switch (VT) { 3387 case MVT::v4f32: NewVT = MVT::v2f64; break; 3388 case MVT::v4i32: NewVT = MVT::v2i64; break; 3389 case MVT::v8i16: NewVT = MVT::v4i32; break; 3390 case MVT::v16i8: NewVT = MVT::v4i32; break; 3391 default: assert(false && "Unexpected!"); 3392 } 3393 3394 if (NewWidth == 2) { 3395 if (MVT::isInteger(VT)) 3396 NewVT = MVT::v2i64; 3397 else 3398 NewVT = MVT::v2f64; 3399 } 3400 unsigned Scale = NumElems / NewWidth; 3401 SmallVector<SDOperand, 8> MaskVec; 3402 for (unsigned i = 0; i < NumElems; i += Scale) { 3403 unsigned StartIdx = ~0U; 3404 for (unsigned j = 0; j < Scale; ++j) { 3405 SDOperand Elt = PermMask.getOperand(i+j); 3406 if (Elt.getOpcode() == ISD::UNDEF) 3407 continue; 3408 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3409 if (StartIdx == ~0U) 3410 StartIdx = EltIdx - (EltIdx % Scale); 3411 if (EltIdx != StartIdx + j) 3412 return SDOperand(); 3413 } 3414 if (StartIdx == ~0U) 3415 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MVT::i32)); 3416 else 3417 MaskVec.push_back(DAG.getConstant(StartIdx / Scale, MVT::i32)); 3418 } 3419 3420 V1 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V1); 3421 V2 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V2); 3422 return DAG.getNode(ISD::VECTOR_SHUFFLE, NewVT, V1, V2, 3423 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3424 &MaskVec[0], MaskVec.size())); 3425} 3426 3427SDOperand 3428X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 3429 SDOperand V1 = Op.getOperand(0); 3430 SDOperand V2 = Op.getOperand(1); 3431 SDOperand PermMask = Op.getOperand(2); 3432 MVT::ValueType VT = Op.getValueType(); 3433 unsigned NumElems = PermMask.getNumOperands(); 3434 bool isMMX = MVT::getSizeInBits(VT) == 64; 3435 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 3436 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 3437 bool V1IsSplat = false; 3438 bool V2IsSplat = false; 3439 3440 if (isUndefShuffle(Op.Val)) 3441 return DAG.getNode(ISD::UNDEF, VT); 3442 3443 if (isZeroShuffle(Op.Val)) 3444 return getZeroVector(VT, DAG); 3445 3446 if (isIdentityMask(PermMask.Val)) 3447 return V1; 3448 else if (isIdentityMask(PermMask.Val, true)) 3449 return V2; 3450 3451 if (isSplatMask(PermMask.Val)) { 3452 if (isMMX || NumElems < 4) return Op; 3453 // Promote it to a v4{if}32 splat. 3454 return PromoteSplat(Op, DAG, Subtarget->hasSSE2()); 3455 } 3456 3457 // If the shuffle can be profitably rewritten as a narrower shuffle, then 3458 // do it! 3459 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 3460 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3461 if (NewOp.Val) 3462 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3463 } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 3464 // FIXME: Figure out a cleaner way to do this. 3465 // Try to make use of movq to zero out the top part. 3466 if (ISD::isBuildVectorAllZeros(V2.Val)) { 3467 SDOperand NewOp = RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3468 if (NewOp.Val) { 3469 SDOperand NewV1 = NewOp.getOperand(0); 3470 SDOperand NewV2 = NewOp.getOperand(1); 3471 SDOperand NewMask = NewOp.getOperand(2); 3472 if (isCommutedMOVL(NewMask.Val, true, false)) { 3473 NewOp = CommuteVectorShuffle(NewOp, NewV1, NewV2, NewMask, DAG); 3474 NewOp = DAG.getNode(ISD::VECTOR_SHUFFLE, NewOp.getValueType(), 3475 NewV1, NewV2, getMOVLMask(2, DAG)); 3476 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3477 } 3478 } 3479 } else if (ISD::isBuildVectorAllZeros(V1.Val)) { 3480 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3481 if (NewOp.Val && X86::isMOVLMask(NewOp.getOperand(2).Val)) 3482 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3483 } 3484 } 3485 3486 if (X86::isMOVLMask(PermMask.Val)) 3487 return (V1IsUndef) ? V2 : Op; 3488 3489 if (X86::isMOVSHDUPMask(PermMask.Val) || 3490 X86::isMOVSLDUPMask(PermMask.Val) || 3491 X86::isMOVHLPSMask(PermMask.Val) || 3492 X86::isMOVHPMask(PermMask.Val) || 3493 X86::isMOVLPMask(PermMask.Val)) 3494 return Op; 3495 3496 if (ShouldXformToMOVHLPS(PermMask.Val) || 3497 ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val)) 3498 return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3499 3500 bool Commuted = false; 3501 // FIXME: This should also accept a bitcast of a splat? Be careful, not 3502 // 1,1,1,1 -> v8i16 though. 3503 V1IsSplat = isSplatVector(V1.Val); 3504 V2IsSplat = isSplatVector(V2.Val); 3505 3506 // Canonicalize the splat or undef, if present, to be on the RHS. 3507 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 3508 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3509 std::swap(V1IsSplat, V2IsSplat); 3510 std::swap(V1IsUndef, V2IsUndef); 3511 Commuted = true; 3512 } 3513 3514 // FIXME: Figure out a cleaner way to do this. 3515 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) { 3516 if (V2IsUndef) return V1; 3517 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3518 if (V2IsSplat) { 3519 // V2 is a splat, so the mask may be malformed. That is, it may point 3520 // to any V2 element. The instruction selectior won't like this. Get 3521 // a corrected mask and commute to form a proper MOVS{S|D}. 3522 SDOperand NewMask = getMOVLMask(NumElems, DAG); 3523 if (NewMask.Val != PermMask.Val) 3524 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3525 } 3526 return Op; 3527 } 3528 3529 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3530 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3531 X86::isUNPCKLMask(PermMask.Val) || 3532 X86::isUNPCKHMask(PermMask.Val)) 3533 return Op; 3534 3535 if (V2IsSplat) { 3536 // Normalize mask so all entries that point to V2 points to its first 3537 // element then try to match unpck{h|l} again. If match, return a 3538 // new vector_shuffle with the corrected mask. 3539 SDOperand NewMask = NormalizeMask(PermMask, DAG); 3540 if (NewMask.Val != PermMask.Val) { 3541 if (X86::isUNPCKLMask(PermMask.Val, true)) { 3542 SDOperand NewMask = getUnpacklMask(NumElems, DAG); 3543 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3544 } else if (X86::isUNPCKHMask(PermMask.Val, true)) { 3545 SDOperand NewMask = getUnpackhMask(NumElems, DAG); 3546 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3547 } 3548 } 3549 } 3550 3551 // Normalize the node to match x86 shuffle ops if needed 3552 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val)) 3553 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3554 3555 if (Commuted) { 3556 // Commute is back and try unpck* again. 3557 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3558 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3559 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3560 X86::isUNPCKLMask(PermMask.Val) || 3561 X86::isUNPCKHMask(PermMask.Val)) 3562 return Op; 3563 } 3564 3565 // Try PSHUF* first, then SHUFP*. 3566 // MMX doesn't have PSHUFD but it does have PSHUFW. While it's theoretically 3567 // possible to shuffle a v2i32 using PSHUFW, that's not yet implemented. 3568 if (isMMX && NumElems == 4 && X86::isPSHUFDMask(PermMask.Val)) { 3569 if (V2.getOpcode() != ISD::UNDEF) 3570 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3571 DAG.getNode(ISD::UNDEF, VT), PermMask); 3572 return Op; 3573 } 3574 3575 if (!isMMX) { 3576 if (Subtarget->hasSSE2() && 3577 (X86::isPSHUFDMask(PermMask.Val) || 3578 X86::isPSHUFHWMask(PermMask.Val) || 3579 X86::isPSHUFLWMask(PermMask.Val))) { 3580 MVT::ValueType RVT = VT; 3581 if (VT == MVT::v4f32) { 3582 RVT = MVT::v4i32; 3583 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, RVT, 3584 DAG.getNode(ISD::BIT_CONVERT, RVT, V1), 3585 DAG.getNode(ISD::UNDEF, RVT), PermMask); 3586 } else if (V2.getOpcode() != ISD::UNDEF) 3587 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, RVT, V1, 3588 DAG.getNode(ISD::UNDEF, RVT), PermMask); 3589 if (RVT != VT) 3590 Op = DAG.getNode(ISD::BIT_CONVERT, VT, Op); 3591 return Op; 3592 } 3593 3594 // Binary or unary shufps. 3595 if (X86::isSHUFPMask(PermMask.Val) || 3596 (V2.getOpcode() == ISD::UNDEF && X86::isPSHUFDMask(PermMask.Val))) 3597 return Op; 3598 } 3599 3600 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 3601 if (VT == MVT::v8i16) { 3602 SDOperand NewOp = LowerVECTOR_SHUFFLEv8i16(V1, V2, PermMask, DAG, *this); 3603 if (NewOp.Val) 3604 return NewOp; 3605 } 3606 3607 // Handle all 4 wide cases with a number of shuffles. 3608 if (NumElems == 4 && !isMMX) { 3609 // Don't do this for MMX. 3610 MVT::ValueType MaskVT = PermMask.getValueType(); 3611 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3612 SmallVector<std::pair<int, int>, 8> Locs; 3613 Locs.reserve(NumElems); 3614 SmallVector<SDOperand, 8> Mask1(NumElems, 3615 DAG.getNode(ISD::UNDEF, MaskEVT)); 3616 SmallVector<SDOperand, 8> Mask2(NumElems, 3617 DAG.getNode(ISD::UNDEF, MaskEVT)); 3618 unsigned NumHi = 0; 3619 unsigned NumLo = 0; 3620 // If no more than two elements come from either vector. This can be 3621 // implemented with two shuffles. First shuffle gather the elements. 3622 // The second shuffle, which takes the first shuffle as both of its 3623 // vector operands, put the elements into the right order. 3624 for (unsigned i = 0; i != NumElems; ++i) { 3625 SDOperand Elt = PermMask.getOperand(i); 3626 if (Elt.getOpcode() == ISD::UNDEF) { 3627 Locs[i] = std::make_pair(-1, -1); 3628 } else { 3629 unsigned Val = cast<ConstantSDNode>(Elt)->getValue(); 3630 if (Val < NumElems) { 3631 Locs[i] = std::make_pair(0, NumLo); 3632 Mask1[NumLo] = Elt; 3633 NumLo++; 3634 } else { 3635 Locs[i] = std::make_pair(1, NumHi); 3636 if (2+NumHi < NumElems) 3637 Mask1[2+NumHi] = Elt; 3638 NumHi++; 3639 } 3640 } 3641 } 3642 if (NumLo <= 2 && NumHi <= 2) { 3643 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3644 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3645 &Mask1[0], Mask1.size())); 3646 for (unsigned i = 0; i != NumElems; ++i) { 3647 if (Locs[i].first == -1) 3648 continue; 3649 else { 3650 unsigned Idx = (i < NumElems/2) ? 0 : NumElems; 3651 Idx += Locs[i].first * (NumElems/2) + Locs[i].second; 3652 Mask2[i] = DAG.getConstant(Idx, MaskEVT); 3653 } 3654 } 3655 3656 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, 3657 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3658 &Mask2[0], Mask2.size())); 3659 } 3660 3661 // Break it into (shuffle shuffle_hi, shuffle_lo). 3662 Locs.clear(); 3663 SmallVector<SDOperand,8> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3664 SmallVector<SDOperand,8> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3665 SmallVector<SDOperand,8> *MaskPtr = &LoMask; 3666 unsigned MaskIdx = 0; 3667 unsigned LoIdx = 0; 3668 unsigned HiIdx = NumElems/2; 3669 for (unsigned i = 0; i != NumElems; ++i) { 3670 if (i == NumElems/2) { 3671 MaskPtr = &HiMask; 3672 MaskIdx = 1; 3673 LoIdx = 0; 3674 HiIdx = NumElems/2; 3675 } 3676 SDOperand Elt = PermMask.getOperand(i); 3677 if (Elt.getOpcode() == ISD::UNDEF) { 3678 Locs[i] = std::make_pair(-1, -1); 3679 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) { 3680 Locs[i] = std::make_pair(MaskIdx, LoIdx); 3681 (*MaskPtr)[LoIdx] = Elt; 3682 LoIdx++; 3683 } else { 3684 Locs[i] = std::make_pair(MaskIdx, HiIdx); 3685 (*MaskPtr)[HiIdx] = Elt; 3686 HiIdx++; 3687 } 3688 } 3689 3690 SDOperand LoShuffle = 3691 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3692 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3693 &LoMask[0], LoMask.size())); 3694 SDOperand HiShuffle = 3695 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3696 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3697 &HiMask[0], HiMask.size())); 3698 SmallVector<SDOperand, 8> MaskOps; 3699 for (unsigned i = 0; i != NumElems; ++i) { 3700 if (Locs[i].first == -1) { 3701 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3702 } else { 3703 unsigned Idx = Locs[i].first * NumElems + Locs[i].second; 3704 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT)); 3705 } 3706 } 3707 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle, 3708 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3709 &MaskOps[0], MaskOps.size())); 3710 } 3711 3712 return SDOperand(); 3713} 3714 3715SDOperand 3716X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDOperand Op, 3717 SelectionDAG &DAG) { 3718 MVT::ValueType VT = Op.getValueType(); 3719 if (MVT::getSizeInBits(VT) == 8) { 3720 SDOperand Extract = DAG.getNode(X86ISD::PEXTRB, MVT::i32, 3721 Op.getOperand(0), Op.getOperand(1)); 3722 SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, 3723 DAG.getValueType(VT)); 3724 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3725 } else if (MVT::getSizeInBits(VT) == 16) { 3726 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, MVT::i32, 3727 Op.getOperand(0), Op.getOperand(1)); 3728 SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, 3729 DAG.getValueType(VT)); 3730 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3731 } else if (VT == MVT::f32) { 3732 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy 3733 // the result back to FR32 register. It's only worth matching if the 3734 // result has a single use which is a store. 3735 if (!Op.hasOneUse()) 3736 return SDOperand(); 3737 SDNode *User = Op.Val->use_begin()->getUser(); 3738 if (User->getOpcode() != ISD::STORE) 3739 return SDOperand(); 3740 SDOperand Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, 3741 DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Op.getOperand(0)), 3742 Op.getOperand(1)); 3743 return DAG.getNode(ISD::BIT_CONVERT, MVT::f32, Extract); 3744 } 3745 return SDOperand(); 3746} 3747 3748 3749SDOperand 3750X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3751 if (!isa<ConstantSDNode>(Op.getOperand(1))) 3752 return SDOperand(); 3753 3754 if (Subtarget->hasSSE41()) { 3755 SDOperand Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 3756 if (Res.Val) 3757 return Res; 3758 } 3759 3760 MVT::ValueType VT = Op.getValueType(); 3761 // TODO: handle v16i8. 3762 if (MVT::getSizeInBits(VT) == 16) { 3763 SDOperand Vec = Op.getOperand(0); 3764 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3765 if (Idx == 0) 3766 return DAG.getNode(ISD::TRUNCATE, MVT::i16, 3767 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, 3768 DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Vec), 3769 Op.getOperand(1))); 3770 // Transform it so it match pextrw which produces a 32-bit result. 3771 MVT::ValueType EVT = (MVT::ValueType)(VT+1); 3772 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, 3773 Op.getOperand(0), Op.getOperand(1)); 3774 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, 3775 DAG.getValueType(VT)); 3776 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3777 } else if (MVT::getSizeInBits(VT) == 32) { 3778 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3779 if (Idx == 0) 3780 return Op; 3781 // SHUFPS the element to the lowest double word, then movss. 3782 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3783 SmallVector<SDOperand, 8> IdxVec; 3784 IdxVec. 3785 push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT))); 3786 IdxVec. 3787 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3788 IdxVec. 3789 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3790 IdxVec. 3791 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3792 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3793 &IdxVec[0], IdxVec.size()); 3794 SDOperand Vec = Op.getOperand(0); 3795 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3796 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3797 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3798 DAG.getIntPtrConstant(0)); 3799 } else if (MVT::getSizeInBits(VT) == 64) { 3800 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 3801 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 3802 // to match extract_elt for f64. 3803 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3804 if (Idx == 0) 3805 return Op; 3806 3807 // UNPCKHPD the element to the lowest double word, then movsd. 3808 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 3809 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 3810 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3811 SmallVector<SDOperand, 8> IdxVec; 3812 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT))); 3813 IdxVec. 3814 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3815 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3816 &IdxVec[0], IdxVec.size()); 3817 SDOperand Vec = Op.getOperand(0); 3818 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3819 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3820 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3821 DAG.getIntPtrConstant(0)); 3822 } 3823 3824 return SDOperand(); 3825} 3826 3827SDOperand 3828X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG){ 3829 MVT::ValueType VT = Op.getValueType(); 3830 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3831 3832 SDOperand N0 = Op.getOperand(0); 3833 SDOperand N1 = Op.getOperand(1); 3834 SDOperand N2 = Op.getOperand(2); 3835 3836 if ((MVT::getSizeInBits(EVT) == 8) || (MVT::getSizeInBits(EVT) == 16)) { 3837 unsigned Opc = (MVT::getSizeInBits(EVT) == 8) ? X86ISD::PINSRB 3838 : X86ISD::PINSRW; 3839 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 3840 // argument. 3841 if (N1.getValueType() != MVT::i32) 3842 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3843 if (N2.getValueType() != MVT::i32) 3844 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); 3845 return DAG.getNode(Opc, VT, N0, N1, N2); 3846 } else if (EVT == MVT::f32) { 3847 // Bits [7:6] of the constant are the source select. This will always be 3848 // zero here. The DAG Combiner may combine an extract_elt index into these 3849 // bits. For example (insert (extract, 3), 2) could be matched by putting 3850 // the '3' into bits [7:6] of X86ISD::INSERTPS. 3851 // Bits [5:4] of the constant are the destination select. This is the 3852 // value of the incoming immediate. 3853 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 3854 // combine either bitwise AND or insert of float 0.0 to set these bits. 3855 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue() << 4); 3856 return DAG.getNode(X86ISD::INSERTPS, VT, N0, N1, N2); 3857 } 3858 return SDOperand(); 3859} 3860 3861SDOperand 3862X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3863 MVT::ValueType VT = Op.getValueType(); 3864 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3865 3866 if (Subtarget->hasSSE41()) 3867 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 3868 3869 if (EVT == MVT::i8) 3870 return SDOperand(); 3871 3872 SDOperand N0 = Op.getOperand(0); 3873 SDOperand N1 = Op.getOperand(1); 3874 SDOperand N2 = Op.getOperand(2); 3875 3876 if (MVT::getSizeInBits(EVT) == 16) { 3877 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 3878 // as its second argument. 3879 if (N1.getValueType() != MVT::i32) 3880 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3881 if (N2.getValueType() != MVT::i32) 3882 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); 3883 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2); 3884 } 3885 return SDOperand(); 3886} 3887 3888SDOperand 3889X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3890 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); 3891 MVT::ValueType VT = MVT::v2i32; 3892 switch (Op.getValueType()) { 3893 default: break; 3894 case MVT::v16i8: 3895 case MVT::v8i16: 3896 VT = MVT::v4i32; 3897 break; 3898 } 3899 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), 3900 DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, AnyExt)); 3901} 3902 3903// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 3904// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 3905// one of the above mentioned nodes. It has to be wrapped because otherwise 3906// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 3907// be used to form addressing mode. These wrapped nodes will be selected 3908// into MOV32ri. 3909SDOperand 3910X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 3911 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 3912 SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(), 3913 getPointerTy(), 3914 CP->getAlignment()); 3915 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3916 // With PIC, the address is actually $g + Offset. 3917 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3918 !Subtarget->isPICStyleRIPRel()) { 3919 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3920 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3921 Result); 3922 } 3923 3924 return Result; 3925} 3926 3927SDOperand 3928X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 3929 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3930 SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); 3931 // If it's a debug information descriptor, don't mess with it. 3932 if (DAG.isVerifiedDebugInfoDesc(Op)) 3933 return Result; 3934 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3935 // With PIC, the address is actually $g + Offset. 3936 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3937 !Subtarget->isPICStyleRIPRel()) { 3938 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3939 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3940 Result); 3941 } 3942 3943 // For Darwin & Mingw32, external and weak symbols are indirect, so we want to 3944 // load the value at address GV, not the value of GV itself. This means that 3945 // the GlobalAddress must be in the base or index register of the address, not 3946 // the GV offset field. Platform check is inside GVRequiresExtraLoad() call 3947 // The same applies for external symbols during PIC codegen 3948 if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)) 3949 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, 3950 PseudoSourceValue::getGOT(), 0); 3951 3952 return Result; 3953} 3954 3955// Lower ISD::GlobalTLSAddress using the "general dynamic" model 3956static SDOperand 3957LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 3958 const MVT::ValueType PtrVT) { 3959 SDOperand InFlag; 3960 SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX, 3961 DAG.getNode(X86ISD::GlobalBaseReg, 3962 PtrVT), InFlag); 3963 InFlag = Chain.getValue(1); 3964 3965 // emit leal symbol@TLSGD(,%ebx,1), %eax 3966 SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag); 3967 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 3968 GA->getValueType(0), 3969 GA->getOffset()); 3970 SDOperand Ops[] = { Chain, TGA, InFlag }; 3971 SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3); 3972 InFlag = Result.getValue(2); 3973 Chain = Result.getValue(1); 3974 3975 // call ___tls_get_addr. This function receives its argument in 3976 // the register EAX. 3977 Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag); 3978 InFlag = Chain.getValue(1); 3979 3980 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 3981 SDOperand Ops1[] = { Chain, 3982 DAG.getTargetExternalSymbol("___tls_get_addr", 3983 PtrVT), 3984 DAG.getRegister(X86::EAX, PtrVT), 3985 DAG.getRegister(X86::EBX, PtrVT), 3986 InFlag }; 3987 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5); 3988 InFlag = Chain.getValue(1); 3989 3990 return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag); 3991} 3992 3993// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 3994// "local exec" model. 3995static SDOperand 3996LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 3997 const MVT::ValueType PtrVT) { 3998 // Get the Thread Pointer 3999 SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT); 4000 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 4001 // exec) 4002 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 4003 GA->getValueType(0), 4004 GA->getOffset()); 4005 SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA); 4006 4007 if (GA->getGlobal()->isDeclaration()) // initial exec TLS model 4008 Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, 4009 PseudoSourceValue::getGOT(), 0); 4010 4011 // The address of the thread local variable is the add of the thread 4012 // pointer with the offset of the variable. 4013 return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset); 4014} 4015 4016SDOperand 4017X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { 4018 // TODO: implement the "local dynamic" model 4019 // TODO: implement the "initial exec"model for pic executables 4020 assert(!Subtarget->is64Bit() && Subtarget->isTargetELF() && 4021 "TLS not implemented for non-ELF and 64-bit targets"); 4022 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 4023 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 4024 // otherwise use the "Local Exec"TLS Model 4025 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 4026 return LowerToTLSGeneralDynamicModel(GA, DAG, getPointerTy()); 4027 else 4028 return LowerToTLSExecModel(GA, DAG, getPointerTy()); 4029} 4030 4031SDOperand 4032X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { 4033 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 4034 SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 4035 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4036 // With PIC, the address is actually $g + Offset. 4037 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4038 !Subtarget->isPICStyleRIPRel()) { 4039 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4040 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4041 Result); 4042 } 4043 4044 return Result; 4045} 4046 4047SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 4048 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 4049 SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); 4050 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4051 // With PIC, the address is actually $g + Offset. 4052 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4053 !Subtarget->isPICStyleRIPRel()) { 4054 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4055 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4056 Result); 4057 } 4058 4059 return Result; 4060} 4061 4062/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and 4063/// take a 2 x i32 value to shift plus a shift amount. 4064SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { 4065 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 4066 MVT::ValueType VT = Op.getValueType(); 4067 unsigned VTBits = MVT::getSizeInBits(VT); 4068 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 4069 SDOperand ShOpLo = Op.getOperand(0); 4070 SDOperand ShOpHi = Op.getOperand(1); 4071 SDOperand ShAmt = Op.getOperand(2); 4072 SDOperand Tmp1 = isSRA ? 4073 DAG.getNode(ISD::SRA, VT, ShOpHi, DAG.getConstant(VTBits - 1, MVT::i8)) : 4074 DAG.getConstant(0, VT); 4075 4076 SDOperand Tmp2, Tmp3; 4077 if (Op.getOpcode() == ISD::SHL_PARTS) { 4078 Tmp2 = DAG.getNode(X86ISD::SHLD, VT, ShOpHi, ShOpLo, ShAmt); 4079 Tmp3 = DAG.getNode(ISD::SHL, VT, ShOpLo, ShAmt); 4080 } else { 4081 Tmp2 = DAG.getNode(X86ISD::SHRD, VT, ShOpLo, ShOpHi, ShAmt); 4082 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, VT, ShOpHi, ShAmt); 4083 } 4084 4085 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4086 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, 4087 DAG.getConstant(VTBits, MVT::i8)); 4088 SDOperand Cond = DAG.getNode(X86ISD::CMP, VT, 4089 AndNode, DAG.getConstant(0, MVT::i8)); 4090 4091 SDOperand Hi, Lo; 4092 SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4093 VTs = DAG.getNodeValueTypes(VT, MVT::Flag); 4094 SmallVector<SDOperand, 4> Ops; 4095 if (Op.getOpcode() == ISD::SHL_PARTS) { 4096 Ops.push_back(Tmp2); 4097 Ops.push_back(Tmp3); 4098 Ops.push_back(CC); 4099 Ops.push_back(Cond); 4100 Hi = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4101 4102 Ops.clear(); 4103 Ops.push_back(Tmp3); 4104 Ops.push_back(Tmp1); 4105 Ops.push_back(CC); 4106 Ops.push_back(Cond); 4107 Lo = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4108 } else { 4109 Ops.push_back(Tmp2); 4110 Ops.push_back(Tmp3); 4111 Ops.push_back(CC); 4112 Ops.push_back(Cond); 4113 Lo = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4114 4115 Ops.clear(); 4116 Ops.push_back(Tmp3); 4117 Ops.push_back(Tmp1); 4118 Ops.push_back(CC); 4119 Ops.push_back(Cond); 4120 Hi = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4121 } 4122 4123 VTs = DAG.getNodeValueTypes(VT, VT); 4124 Ops.clear(); 4125 Ops.push_back(Lo); 4126 Ops.push_back(Hi); 4127 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size()); 4128} 4129 4130SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 4131 MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); 4132 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 && 4133 "Unknown SINT_TO_FP to lower!"); 4134 4135 // These are really Legal; caller falls through into that case. 4136 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 4137 return SDOperand(); 4138 if (SrcVT == MVT::i64 && Op.getValueType() != MVT::f80 && 4139 Subtarget->is64Bit()) 4140 return SDOperand(); 4141 4142 unsigned Size = MVT::getSizeInBits(SrcVT)/8; 4143 MachineFunction &MF = DAG.getMachineFunction(); 4144 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 4145 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4146 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), 4147 StackSlot, 4148 PseudoSourceValue::getFixedStack(), 4149 SSFI); 4150 4151 // Build the FILD 4152 SDVTList Tys; 4153 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 4154 if (useSSE) 4155 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 4156 else 4157 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 4158 SmallVector<SDOperand, 8> Ops; 4159 Ops.push_back(Chain); 4160 Ops.push_back(StackSlot); 4161 Ops.push_back(DAG.getValueType(SrcVT)); 4162 SDOperand Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, 4163 Tys, &Ops[0], Ops.size()); 4164 4165 if (useSSE) { 4166 Chain = Result.getValue(1); 4167 SDOperand InFlag = Result.getValue(2); 4168 4169 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 4170 // shouldn't be necessary except that RFP cannot be live across 4171 // multiple blocks. When stackifier is fixed, they can be uncoupled. 4172 MachineFunction &MF = DAG.getMachineFunction(); 4173 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 4174 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4175 Tys = DAG.getVTList(MVT::Other); 4176 SmallVector<SDOperand, 8> Ops; 4177 Ops.push_back(Chain); 4178 Ops.push_back(Result); 4179 Ops.push_back(StackSlot); 4180 Ops.push_back(DAG.getValueType(Op.getValueType())); 4181 Ops.push_back(InFlag); 4182 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 4183 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, 4184 PseudoSourceValue::getFixedStack(), SSFI); 4185 } 4186 4187 return Result; 4188} 4189 4190std::pair<SDOperand,SDOperand> X86TargetLowering:: 4191FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) { 4192 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 && 4193 "Unknown FP_TO_SINT to lower!"); 4194 4195 // These are really Legal. 4196 if (Op.getValueType() == MVT::i32 && 4197 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 4198 return std::make_pair(SDOperand(), SDOperand()); 4199 if (Subtarget->is64Bit() && 4200 Op.getValueType() == MVT::i64 && 4201 Op.getOperand(0).getValueType() != MVT::f80) 4202 return std::make_pair(SDOperand(), SDOperand()); 4203 4204 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 4205 // stack slot. 4206 MachineFunction &MF = DAG.getMachineFunction(); 4207 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; 4208 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4209 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4210 unsigned Opc; 4211 switch (Op.getValueType()) { 4212 default: assert(0 && "Invalid FP_TO_SINT to lower!"); 4213 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 4214 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 4215 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 4216 } 4217 4218 SDOperand Chain = DAG.getEntryNode(); 4219 SDOperand Value = Op.getOperand(0); 4220 if (isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) { 4221 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 4222 Chain = DAG.getStore(Chain, Value, StackSlot, 4223 PseudoSourceValue::getFixedStack(), SSFI); 4224 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 4225 SDOperand Ops[] = { 4226 Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType()) 4227 }; 4228 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 4229 Chain = Value.getValue(1); 4230 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4231 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4232 } 4233 4234 // Build the FP_TO_INT*_IN_MEM 4235 SDOperand Ops[] = { Chain, Value, StackSlot }; 4236 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3); 4237 4238 return std::make_pair(FIST, StackSlot); 4239} 4240 4241SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 4242 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(Op, DAG); 4243 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4244 if (FIST.Val == 0) return SDOperand(); 4245 4246 // Load the result. 4247 return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0); 4248} 4249 4250SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) { 4251 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(SDOperand(N, 0), DAG); 4252 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4253 if (FIST.Val == 0) return 0; 4254 4255 // Return an i64 load from the stack slot. 4256 SDOperand Res = DAG.getLoad(MVT::i64, FIST, StackSlot, NULL, 0); 4257 4258 // Use a MERGE_VALUES node to drop the chain result value. 4259 return DAG.getNode(ISD::MERGE_VALUES, MVT::i64, Res).Val; 4260} 4261 4262SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { 4263 MVT::ValueType VT = Op.getValueType(); 4264 MVT::ValueType EltVT = VT; 4265 if (MVT::isVector(VT)) 4266 EltVT = MVT::getVectorElementType(VT); 4267 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 4268 std::vector<Constant*> CV; 4269 if (EltVT == MVT::f64) { 4270 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, ~(1ULL << 63)))); 4271 CV.push_back(C); 4272 CV.push_back(C); 4273 } else { 4274 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, ~(1U << 31)))); 4275 CV.push_back(C); 4276 CV.push_back(C); 4277 CV.push_back(C); 4278 CV.push_back(C); 4279 } 4280 Constant *C = ConstantVector::get(CV); 4281 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4282 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4283 PseudoSourceValue::getConstantPool(), 0, 4284 false, 16); 4285 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask); 4286} 4287 4288SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { 4289 MVT::ValueType VT = Op.getValueType(); 4290 MVT::ValueType EltVT = VT; 4291 unsigned EltNum = 1; 4292 if (MVT::isVector(VT)) { 4293 EltVT = MVT::getVectorElementType(VT); 4294 EltNum = MVT::getVectorNumElements(VT); 4295 } 4296 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 4297 std::vector<Constant*> CV; 4298 if (EltVT == MVT::f64) { 4299 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, 1ULL << 63))); 4300 CV.push_back(C); 4301 CV.push_back(C); 4302 } else { 4303 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, 1U << 31))); 4304 CV.push_back(C); 4305 CV.push_back(C); 4306 CV.push_back(C); 4307 CV.push_back(C); 4308 } 4309 Constant *C = ConstantVector::get(CV); 4310 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4311 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4312 PseudoSourceValue::getConstantPool(), 0, 4313 false, 16); 4314 if (MVT::isVector(VT)) { 4315 return DAG.getNode(ISD::BIT_CONVERT, VT, 4316 DAG.getNode(ISD::XOR, MVT::v2i64, 4317 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)), 4318 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Mask))); 4319 } else { 4320 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask); 4321 } 4322} 4323 4324SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { 4325 SDOperand Op0 = Op.getOperand(0); 4326 SDOperand Op1 = Op.getOperand(1); 4327 MVT::ValueType VT = Op.getValueType(); 4328 MVT::ValueType SrcVT = Op1.getValueType(); 4329 const Type *SrcTy = MVT::getTypeForValueType(SrcVT); 4330 4331 // If second operand is smaller, extend it first. 4332 if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) { 4333 Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1); 4334 SrcVT = VT; 4335 SrcTy = MVT::getTypeForValueType(SrcVT); 4336 } 4337 // And if it is bigger, shrink it first. 4338 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4339 Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1, DAG.getIntPtrConstant(1)); 4340 SrcVT = VT; 4341 SrcTy = MVT::getTypeForValueType(SrcVT); 4342 } 4343 4344 // At this point the operands and the result should have the same 4345 // type, and that won't be f80 since that is not custom lowered. 4346 4347 // First get the sign bit of second operand. 4348 std::vector<Constant*> CV; 4349 if (SrcVT == MVT::f64) { 4350 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 1ULL << 63)))); 4351 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4352 } else { 4353 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 1U << 31)))); 4354 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4355 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4356 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4357 } 4358 Constant *C = ConstantVector::get(CV); 4359 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4360 SDOperand Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx, 4361 PseudoSourceValue::getConstantPool(), 0, 4362 false, 16); 4363 SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); 4364 4365 // Shift sign bit right or left if the two operands have different types. 4366 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4367 // Op0 is MVT::f32, Op1 is MVT::f64. 4368 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit); 4369 SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit, 4370 DAG.getConstant(32, MVT::i32)); 4371 SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit); 4372 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit, 4373 DAG.getIntPtrConstant(0)); 4374 } 4375 4376 // Clear first operand sign bit. 4377 CV.clear(); 4378 if (VT == MVT::f64) { 4379 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, ~(1ULL << 63))))); 4380 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4381 } else { 4382 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, ~(1U << 31))))); 4383 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4384 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4385 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4386 } 4387 C = ConstantVector::get(CV); 4388 CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4389 SDOperand Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4390 PseudoSourceValue::getConstantPool(), 0, 4391 false, 16); 4392 SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2); 4393 4394 // Or the value with the sign bit. 4395 return DAG.getNode(X86ISD::FOR, VT, Val, SignBit); 4396} 4397 4398SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { 4399 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 4400 SDOperand Cond; 4401 SDOperand Op0 = Op.getOperand(0); 4402 SDOperand Op1 = Op.getOperand(1); 4403 SDOperand CC = Op.getOperand(2); 4404 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 4405 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); 4406 unsigned X86CC; 4407 4408 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, 4409 Op0, Op1, DAG)) { 4410 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4411 return DAG.getNode(X86ISD::SETCC, MVT::i8, 4412 DAG.getConstant(X86CC, MVT::i8), Cond); 4413 } 4414 4415 assert(isFP && "Illegal integer SetCC!"); 4416 4417 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4418 switch (SetCCOpcode) { 4419 default: assert(false && "Illegal floating point SetCC!"); 4420 case ISD::SETOEQ: { // !PF & ZF 4421 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4422 DAG.getConstant(X86::COND_NP, MVT::i8), Cond); 4423 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4424 DAG.getConstant(X86::COND_E, MVT::i8), Cond); 4425 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); 4426 } 4427 case ISD::SETUNE: { // PF | !ZF 4428 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4429 DAG.getConstant(X86::COND_P, MVT::i8), Cond); 4430 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4431 DAG.getConstant(X86::COND_NE, MVT::i8), Cond); 4432 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); 4433 } 4434 } 4435} 4436 4437 4438SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { 4439 bool addTest = true; 4440 SDOperand Cond = Op.getOperand(0); 4441 SDOperand CC; 4442 4443 if (Cond.getOpcode() == ISD::SETCC) 4444 Cond = LowerSETCC(Cond, DAG); 4445 4446 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4447 // setting operand in place of the X86ISD::SETCC. 4448 if (Cond.getOpcode() == X86ISD::SETCC) { 4449 CC = Cond.getOperand(0); 4450 4451 SDOperand Cmp = Cond.getOperand(1); 4452 unsigned Opc = Cmp.getOpcode(); 4453 MVT::ValueType VT = Op.getValueType(); 4454 4455 bool IllegalFPCMov = false; 4456 if (MVT::isFloatingPoint(VT) && !MVT::isVector(VT) && 4457 !isScalarFPTypeInSSEReg(VT)) // FPStack? 4458 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 4459 4460 if ((Opc == X86ISD::CMP || 4461 Opc == X86ISD::COMI || 4462 Opc == X86ISD::UCOMI) && !IllegalFPCMov) { 4463 Cond = Cmp; 4464 addTest = false; 4465 } 4466 } 4467 4468 if (addTest) { 4469 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4470 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4471 } 4472 4473 const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(), 4474 MVT::Flag); 4475 SmallVector<SDOperand, 4> Ops; 4476 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 4477 // condition is true. 4478 Ops.push_back(Op.getOperand(2)); 4479 Ops.push_back(Op.getOperand(1)); 4480 Ops.push_back(CC); 4481 Ops.push_back(Cond); 4482 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 4483} 4484 4485SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { 4486 bool addTest = true; 4487 SDOperand Chain = Op.getOperand(0); 4488 SDOperand Cond = Op.getOperand(1); 4489 SDOperand Dest = Op.getOperand(2); 4490 SDOperand CC; 4491 4492 if (Cond.getOpcode() == ISD::SETCC) 4493 Cond = LowerSETCC(Cond, DAG); 4494 4495 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4496 // setting operand in place of the X86ISD::SETCC. 4497 if (Cond.getOpcode() == X86ISD::SETCC) { 4498 CC = Cond.getOperand(0); 4499 4500 SDOperand Cmp = Cond.getOperand(1); 4501 unsigned Opc = Cmp.getOpcode(); 4502 if (Opc == X86ISD::CMP || 4503 Opc == X86ISD::COMI || 4504 Opc == X86ISD::UCOMI) { 4505 Cond = Cmp; 4506 addTest = false; 4507 } 4508 } 4509 4510 if (addTest) { 4511 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4512 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4513 } 4514 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), 4515 Chain, Op.getOperand(2), CC, Cond); 4516} 4517 4518 4519// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 4520// Calls to _alloca is needed to probe the stack when allocating more than 4k 4521// bytes in one go. Touching the stack at 4K increments is necessary to ensure 4522// that the guard pages used by the OS virtual memory manager are allocated in 4523// correct sequence. 4524SDOperand 4525X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, 4526 SelectionDAG &DAG) { 4527 assert(Subtarget->isTargetCygMing() && 4528 "This should be used only on Cygwin/Mingw targets"); 4529 4530 // Get the inputs. 4531 SDOperand Chain = Op.getOperand(0); 4532 SDOperand Size = Op.getOperand(1); 4533 // FIXME: Ensure alignment here 4534 4535 SDOperand Flag; 4536 4537 MVT::ValueType IntPtr = getPointerTy(); 4538 MVT::ValueType SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; 4539 4540 Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag); 4541 Flag = Chain.getValue(1); 4542 4543 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4544 SDOperand Ops[] = { Chain, 4545 DAG.getTargetExternalSymbol("_alloca", IntPtr), 4546 DAG.getRegister(X86::EAX, IntPtr), 4547 Flag }; 4548 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4); 4549 Flag = Chain.getValue(1); 4550 4551 Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1); 4552 4553 std::vector<MVT::ValueType> Tys; 4554 Tys.push_back(SPTy); 4555 Tys.push_back(MVT::Other); 4556 SDOperand Ops1[2] = { Chain.getValue(0), Chain }; 4557 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2); 4558} 4559 4560SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) { 4561 SDOperand InFlag(0, 0); 4562 SDOperand Chain = Op.getOperand(0); 4563 unsigned Align = 4564 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 4565 if (Align == 0) Align = 1; 4566 4567 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4568 // If not DWORD aligned or size is more than the threshold, call memset. 4569 // The libc version is likely to be faster for these cases. It can use the 4570 // address value and run time information about the CPU. 4571 if ((Align & 3) != 0 || 4572 (I && I->getValue() > Subtarget->getMaxInlineSizeThreshold())) { 4573 4574 // Check to see if there is a specialized entry-point for memory zeroing. 4575 ConstantSDNode *V = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4576 const char *bzeroEntry = 4577 V && V->isNullValue() ? Subtarget->getBZeroEntry() : 0; 4578 4579 MVT::ValueType IntPtr = getPointerTy(); 4580 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 4581 TargetLowering::ArgListTy Args; 4582 TargetLowering::ArgListEntry Entry; 4583 Entry.Node = Op.getOperand(1); 4584 Entry.Ty = IntPtrTy; 4585 Args.push_back(Entry); 4586 4587 if (!bzeroEntry) { 4588 // Extend the unsigned i8 argument to be an int value for the call. 4589 Entry.Node = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2)); 4590 Entry.Ty = IntPtrTy; 4591 Args.push_back(Entry); 4592 } 4593 4594 Entry.Node = Op.getOperand(3); 4595 Args.push_back(Entry); 4596 const char *Name = bzeroEntry ? bzeroEntry : "memset"; 4597 std::pair<SDOperand,SDOperand> CallResult = 4598 LowerCallTo(Chain, Type::VoidTy, false, false, false, CallingConv::C, 4599 false, DAG.getExternalSymbol(Name, IntPtr), Args, DAG); 4600 return CallResult.second; 4601 } 4602 4603 MVT::ValueType AVT; 4604 SDOperand Count; 4605 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4606 unsigned BytesLeft = 0; 4607 bool TwoRepStos = false; 4608 if (ValC) { 4609 unsigned ValReg; 4610 uint64_t Val = ValC->getValue() & 255; 4611 4612 // If the value is a constant, then we can potentially use larger sets. 4613 switch (Align & 3) { 4614 case 2: // WORD aligned 4615 AVT = MVT::i16; 4616 ValReg = X86::AX; 4617 Val = (Val << 8) | Val; 4618 break; 4619 case 0: // DWORD aligned 4620 AVT = MVT::i32; 4621 ValReg = X86::EAX; 4622 Val = (Val << 8) | Val; 4623 Val = (Val << 16) | Val; 4624 if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) { // QWORD aligned 4625 AVT = MVT::i64; 4626 ValReg = X86::RAX; 4627 Val = (Val << 32) | Val; 4628 } 4629 break; 4630 default: // Byte aligned 4631 AVT = MVT::i8; 4632 ValReg = X86::AL; 4633 Count = Op.getOperand(3); 4634 break; 4635 } 4636 4637 if (AVT > MVT::i8) { 4638 if (I) { 4639 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4640 Count = DAG.getIntPtrConstant(I->getValue() / UBytes); 4641 BytesLeft = I->getValue() % UBytes; 4642 } else { 4643 assert(AVT >= MVT::i32 && 4644 "Do not use rep;stos if not at least DWORD aligned"); 4645 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 4646 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 4647 TwoRepStos = true; 4648 } 4649 } 4650 4651 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT), 4652 InFlag); 4653 InFlag = Chain.getValue(1); 4654 } else { 4655 AVT = MVT::i8; 4656 Count = Op.getOperand(3); 4657 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag); 4658 InFlag = Chain.getValue(1); 4659 } 4660 4661 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4662 Count, InFlag); 4663 InFlag = Chain.getValue(1); 4664 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4665 Op.getOperand(1), InFlag); 4666 InFlag = Chain.getValue(1); 4667 4668 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4669 SmallVector<SDOperand, 8> Ops; 4670 Ops.push_back(Chain); 4671 Ops.push_back(DAG.getValueType(AVT)); 4672 Ops.push_back(InFlag); 4673 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4674 4675 if (TwoRepStos) { 4676 InFlag = Chain.getValue(1); 4677 Count = Op.getOperand(3); 4678 MVT::ValueType CVT = Count.getValueType(); 4679 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4680 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4681 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4682 Left, InFlag); 4683 InFlag = Chain.getValue(1); 4684 Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4685 Ops.clear(); 4686 Ops.push_back(Chain); 4687 Ops.push_back(DAG.getValueType(MVT::i8)); 4688 Ops.push_back(InFlag); 4689 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4690 } else if (BytesLeft) { 4691 // Issue stores for the last 1 - 7 bytes. 4692 SDOperand Value; 4693 unsigned Val = ValC->getValue() & 255; 4694 unsigned Offset = I->getValue() - BytesLeft; 4695 SDOperand DstAddr = Op.getOperand(1); 4696 MVT::ValueType AddrVT = DstAddr.getValueType(); 4697 if (BytesLeft >= 4) { 4698 Val = (Val << 8) | Val; 4699 Val = (Val << 16) | Val; 4700 Value = DAG.getConstant(Val, MVT::i32); 4701 Chain = DAG.getStore(Chain, Value, 4702 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4703 DAG.getConstant(Offset, AddrVT)), 4704 NULL, 0); 4705 BytesLeft -= 4; 4706 Offset += 4; 4707 } 4708 if (BytesLeft >= 2) { 4709 Value = DAG.getConstant((Val << 8) | Val, MVT::i16); 4710 Chain = DAG.getStore(Chain, Value, 4711 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4712 DAG.getConstant(Offset, AddrVT)), 4713 NULL, 0); 4714 BytesLeft -= 2; 4715 Offset += 2; 4716 } 4717 if (BytesLeft == 1) { 4718 Value = DAG.getConstant(Val, MVT::i8); 4719 Chain = DAG.getStore(Chain, Value, 4720 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4721 DAG.getConstant(Offset, AddrVT)), 4722 NULL, 0); 4723 } 4724 } 4725 4726 return Chain; 4727} 4728 4729SDOperand X86TargetLowering::LowerMEMCPYInline(SDOperand Chain, 4730 SDOperand Dest, 4731 SDOperand Source, 4732 unsigned Size, 4733 unsigned Align, 4734 SelectionDAG &DAG) { 4735 MVT::ValueType AVT; 4736 unsigned BytesLeft = 0; 4737 switch (Align & 3) { 4738 case 2: // WORD aligned 4739 AVT = MVT::i16; 4740 break; 4741 case 0: // DWORD aligned 4742 AVT = MVT::i32; 4743 if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) // QWORD aligned 4744 AVT = MVT::i64; 4745 break; 4746 default: // Byte aligned 4747 AVT = MVT::i8; 4748 break; 4749 } 4750 4751 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4752 SDOperand Count = DAG.getIntPtrConstant(Size / UBytes); 4753 BytesLeft = Size % UBytes; 4754 4755 SDOperand InFlag(0, 0); 4756 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4757 Count, InFlag); 4758 InFlag = Chain.getValue(1); 4759 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4760 Dest, InFlag); 4761 InFlag = Chain.getValue(1); 4762 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI, 4763 Source, InFlag); 4764 InFlag = Chain.getValue(1); 4765 4766 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4767 SmallVector<SDOperand, 8> Ops; 4768 Ops.push_back(Chain); 4769 Ops.push_back(DAG.getValueType(AVT)); 4770 Ops.push_back(InFlag); 4771 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4772 4773 if (BytesLeft) { 4774 // Issue loads and stores for the last 1 - 7 bytes. 4775 unsigned Offset = Size - BytesLeft; 4776 SDOperand DstAddr = Dest; 4777 MVT::ValueType DstVT = DstAddr.getValueType(); 4778 SDOperand SrcAddr = Source; 4779 MVT::ValueType SrcVT = SrcAddr.getValueType(); 4780 SDOperand Value; 4781 if (BytesLeft >= 4) { 4782 Value = DAG.getLoad(MVT::i32, Chain, 4783 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4784 DAG.getConstant(Offset, SrcVT)), 4785 NULL, 0); 4786 Chain = Value.getValue(1); 4787 Chain = DAG.getStore(Chain, Value, 4788 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4789 DAG.getConstant(Offset, DstVT)), 4790 NULL, 0); 4791 BytesLeft -= 4; 4792 Offset += 4; 4793 } 4794 if (BytesLeft >= 2) { 4795 Value = DAG.getLoad(MVT::i16, Chain, 4796 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4797 DAG.getConstant(Offset, SrcVT)), 4798 NULL, 0); 4799 Chain = Value.getValue(1); 4800 Chain = DAG.getStore(Chain, Value, 4801 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4802 DAG.getConstant(Offset, DstVT)), 4803 NULL, 0); 4804 BytesLeft -= 2; 4805 Offset += 2; 4806 } 4807 4808 if (BytesLeft == 1) { 4809 Value = DAG.getLoad(MVT::i8, Chain, 4810 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4811 DAG.getConstant(Offset, SrcVT)), 4812 NULL, 0); 4813 Chain = Value.getValue(1); 4814 Chain = DAG.getStore(Chain, Value, 4815 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4816 DAG.getConstant(Offset, DstVT)), 4817 NULL, 0); 4818 } 4819 } 4820 4821 return Chain; 4822} 4823 4824/// Expand the result of: i64,outchain = READCYCLECOUNTER inchain 4825SDNode *X86TargetLowering::ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG){ 4826 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4827 SDOperand TheChain = N->getOperand(0); 4828 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheChain, 1); 4829 if (Subtarget->is64Bit()) { 4830 SDOperand rax = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); 4831 SDOperand rdx = DAG.getCopyFromReg(rax.getValue(1), X86::RDX, 4832 MVT::i64, rax.getValue(2)); 4833 SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, rdx, 4834 DAG.getConstant(32, MVT::i8)); 4835 SDOperand Ops[] = { 4836 DAG.getNode(ISD::OR, MVT::i64, rax, Tmp), rdx.getValue(1) 4837 }; 4838 4839 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4840 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4841 } 4842 4843 SDOperand eax = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); 4844 SDOperand edx = DAG.getCopyFromReg(eax.getValue(1), X86::EDX, 4845 MVT::i32, eax.getValue(2)); 4846 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 4847 SDOperand Ops[] = { eax, edx }; 4848 Ops[0] = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Ops, 2); 4849 4850 // Use a MERGE_VALUES to return the value and chain. 4851 Ops[1] = edx.getValue(1); 4852 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4853 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4854} 4855 4856SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { 4857 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 4858 4859 if (!Subtarget->is64Bit()) { 4860 // vastart just stores the address of the VarArgsFrameIndex slot into the 4861 // memory location argument. 4862 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4863 return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV, 0); 4864 } 4865 4866 // __va_list_tag: 4867 // gp_offset (0 - 6 * 8) 4868 // fp_offset (48 - 48 + 8 * 16) 4869 // overflow_arg_area (point to parameters coming in memory). 4870 // reg_save_area 4871 SmallVector<SDOperand, 8> MemOps; 4872 SDOperand FIN = Op.getOperand(1); 4873 // Store gp_offset 4874 SDOperand Store = DAG.getStore(Op.getOperand(0), 4875 DAG.getConstant(VarArgsGPOffset, MVT::i32), 4876 FIN, SV, 0); 4877 MemOps.push_back(Store); 4878 4879 // Store fp_offset 4880 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4881 Store = DAG.getStore(Op.getOperand(0), 4882 DAG.getConstant(VarArgsFPOffset, MVT::i32), 4883 FIN, SV, 0); 4884 MemOps.push_back(Store); 4885 4886 // Store ptr to overflow_arg_area 4887 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4888 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4889 Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV, 0); 4890 MemOps.push_back(Store); 4891 4892 // Store ptr to reg_save_area. 4893 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); 4894 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 4895 Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV, 0); 4896 MemOps.push_back(Store); 4897 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size()); 4898} 4899 4900SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) { 4901 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 4902 SDOperand Chain = Op.getOperand(0); 4903 SDOperand DstPtr = Op.getOperand(1); 4904 SDOperand SrcPtr = Op.getOperand(2); 4905 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 4906 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 4907 4908 SrcPtr = DAG.getLoad(getPointerTy(), Chain, SrcPtr, SrcSV, 0); 4909 Chain = SrcPtr.getValue(1); 4910 for (unsigned i = 0; i < 3; ++i) { 4911 SDOperand Val = DAG.getLoad(MVT::i64, Chain, SrcPtr, SrcSV, 0); 4912 Chain = Val.getValue(1); 4913 Chain = DAG.getStore(Chain, Val, DstPtr, DstSV, 0); 4914 if (i == 2) 4915 break; 4916 SrcPtr = DAG.getNode(ISD::ADD, getPointerTy(), SrcPtr, 4917 DAG.getIntPtrConstant(8)); 4918 DstPtr = DAG.getNode(ISD::ADD, getPointerTy(), DstPtr, 4919 DAG.getIntPtrConstant(8)); 4920 } 4921 return Chain; 4922} 4923 4924SDOperand 4925X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 4926 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); 4927 switch (IntNo) { 4928 default: return SDOperand(); // Don't custom lower most intrinsics. 4929 // Comparison intrinsics. 4930 case Intrinsic::x86_sse_comieq_ss: 4931 case Intrinsic::x86_sse_comilt_ss: 4932 case Intrinsic::x86_sse_comile_ss: 4933 case Intrinsic::x86_sse_comigt_ss: 4934 case Intrinsic::x86_sse_comige_ss: 4935 case Intrinsic::x86_sse_comineq_ss: 4936 case Intrinsic::x86_sse_ucomieq_ss: 4937 case Intrinsic::x86_sse_ucomilt_ss: 4938 case Intrinsic::x86_sse_ucomile_ss: 4939 case Intrinsic::x86_sse_ucomigt_ss: 4940 case Intrinsic::x86_sse_ucomige_ss: 4941 case Intrinsic::x86_sse_ucomineq_ss: 4942 case Intrinsic::x86_sse2_comieq_sd: 4943 case Intrinsic::x86_sse2_comilt_sd: 4944 case Intrinsic::x86_sse2_comile_sd: 4945 case Intrinsic::x86_sse2_comigt_sd: 4946 case Intrinsic::x86_sse2_comige_sd: 4947 case Intrinsic::x86_sse2_comineq_sd: 4948 case Intrinsic::x86_sse2_ucomieq_sd: 4949 case Intrinsic::x86_sse2_ucomilt_sd: 4950 case Intrinsic::x86_sse2_ucomile_sd: 4951 case Intrinsic::x86_sse2_ucomigt_sd: 4952 case Intrinsic::x86_sse2_ucomige_sd: 4953 case Intrinsic::x86_sse2_ucomineq_sd: { 4954 unsigned Opc = 0; 4955 ISD::CondCode CC = ISD::SETCC_INVALID; 4956 switch (IntNo) { 4957 default: break; 4958 case Intrinsic::x86_sse_comieq_ss: 4959 case Intrinsic::x86_sse2_comieq_sd: 4960 Opc = X86ISD::COMI; 4961 CC = ISD::SETEQ; 4962 break; 4963 case Intrinsic::x86_sse_comilt_ss: 4964 case Intrinsic::x86_sse2_comilt_sd: 4965 Opc = X86ISD::COMI; 4966 CC = ISD::SETLT; 4967 break; 4968 case Intrinsic::x86_sse_comile_ss: 4969 case Intrinsic::x86_sse2_comile_sd: 4970 Opc = X86ISD::COMI; 4971 CC = ISD::SETLE; 4972 break; 4973 case Intrinsic::x86_sse_comigt_ss: 4974 case Intrinsic::x86_sse2_comigt_sd: 4975 Opc = X86ISD::COMI; 4976 CC = ISD::SETGT; 4977 break; 4978 case Intrinsic::x86_sse_comige_ss: 4979 case Intrinsic::x86_sse2_comige_sd: 4980 Opc = X86ISD::COMI; 4981 CC = ISD::SETGE; 4982 break; 4983 case Intrinsic::x86_sse_comineq_ss: 4984 case Intrinsic::x86_sse2_comineq_sd: 4985 Opc = X86ISD::COMI; 4986 CC = ISD::SETNE; 4987 break; 4988 case Intrinsic::x86_sse_ucomieq_ss: 4989 case Intrinsic::x86_sse2_ucomieq_sd: 4990 Opc = X86ISD::UCOMI; 4991 CC = ISD::SETEQ; 4992 break; 4993 case Intrinsic::x86_sse_ucomilt_ss: 4994 case Intrinsic::x86_sse2_ucomilt_sd: 4995 Opc = X86ISD::UCOMI; 4996 CC = ISD::SETLT; 4997 break; 4998 case Intrinsic::x86_sse_ucomile_ss: 4999 case Intrinsic::x86_sse2_ucomile_sd: 5000 Opc = X86ISD::UCOMI; 5001 CC = ISD::SETLE; 5002 break; 5003 case Intrinsic::x86_sse_ucomigt_ss: 5004 case Intrinsic::x86_sse2_ucomigt_sd: 5005 Opc = X86ISD::UCOMI; 5006 CC = ISD::SETGT; 5007 break; 5008 case Intrinsic::x86_sse_ucomige_ss: 5009 case Intrinsic::x86_sse2_ucomige_sd: 5010 Opc = X86ISD::UCOMI; 5011 CC = ISD::SETGE; 5012 break; 5013 case Intrinsic::x86_sse_ucomineq_ss: 5014 case Intrinsic::x86_sse2_ucomineq_sd: 5015 Opc = X86ISD::UCOMI; 5016 CC = ISD::SETNE; 5017 break; 5018 } 5019 5020 unsigned X86CC; 5021 SDOperand LHS = Op.getOperand(1); 5022 SDOperand RHS = Op.getOperand(2); 5023 translateX86CC(CC, true, X86CC, LHS, RHS, DAG); 5024 5025 SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS); 5026 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8, 5027 DAG.getConstant(X86CC, MVT::i8), Cond); 5028 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); 5029 } 5030 } 5031} 5032 5033SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { 5034 // Depths > 0 not supported yet! 5035 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 5036 return SDOperand(); 5037 5038 // Just load the return address 5039 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 5040 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); 5041} 5042 5043SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { 5044 // Depths > 0 not supported yet! 5045 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 5046 return SDOperand(); 5047 5048 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 5049 return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI, 5050 DAG.getIntPtrConstant(4)); 5051} 5052 5053SDOperand X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDOperand Op, 5054 SelectionDAG &DAG) { 5055 // Is not yet supported on x86-64 5056 if (Subtarget->is64Bit()) 5057 return SDOperand(); 5058 5059 return DAG.getIntPtrConstant(8); 5060} 5061 5062SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG) 5063{ 5064 assert(!Subtarget->is64Bit() && 5065 "Lowering of eh_return builtin is not supported yet on x86-64"); 5066 5067 MachineFunction &MF = DAG.getMachineFunction(); 5068 SDOperand Chain = Op.getOperand(0); 5069 SDOperand Offset = Op.getOperand(1); 5070 SDOperand Handler = Op.getOperand(2); 5071 5072 SDOperand Frame = DAG.getRegister(RegInfo->getFrameRegister(MF), 5073 getPointerTy()); 5074 5075 SDOperand StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame, 5076 DAG.getIntPtrConstant(-4UL)); 5077 StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset); 5078 Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0); 5079 Chain = DAG.getCopyToReg(Chain, X86::ECX, StoreAddr); 5080 MF.getRegInfo().addLiveOut(X86::ECX); 5081 5082 return DAG.getNode(X86ISD::EH_RETURN, MVT::Other, 5083 Chain, DAG.getRegister(X86::ECX, getPointerTy())); 5084} 5085 5086SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, 5087 SelectionDAG &DAG) { 5088 SDOperand Root = Op.getOperand(0); 5089 SDOperand Trmp = Op.getOperand(1); // trampoline 5090 SDOperand FPtr = Op.getOperand(2); // nested function 5091 SDOperand Nest = Op.getOperand(3); // 'nest' parameter value 5092 5093 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 5094 5095 const X86InstrInfo *TII = 5096 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 5097 5098 if (Subtarget->is64Bit()) { 5099 SDOperand OutChains[6]; 5100 5101 // Large code-model. 5102 5103 const unsigned char JMP64r = TII->getBaseOpcodeFor(X86::JMP64r); 5104 const unsigned char MOV64ri = TII->getBaseOpcodeFor(X86::MOV64ri); 5105 5106 const unsigned char N86R10 = 5107 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R10); 5108 const unsigned char N86R11 = 5109 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R11); 5110 5111 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 5112 5113 // Load the pointer to the nested function into R11. 5114 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 5115 SDOperand Addr = Trmp; 5116 OutChains[0] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5117 TrmpAddr, 0); 5118 5119 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(2, MVT::i64)); 5120 OutChains[1] = DAG.getStore(Root, FPtr, Addr, TrmpAddr, 2, false, 2); 5121 5122 // Load the 'nest' parameter value into R10. 5123 // R10 is specified in X86CallingConv.td 5124 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 5125 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(10, MVT::i64)); 5126 OutChains[2] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5127 TrmpAddr, 10); 5128 5129 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(12, MVT::i64)); 5130 OutChains[3] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 12, false, 2); 5131 5132 // Jump to the nested function. 5133 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 5134 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(20, MVT::i64)); 5135 OutChains[4] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5136 TrmpAddr, 20); 5137 5138 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 5139 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(22, MVT::i64)); 5140 OutChains[5] = DAG.getStore(Root, DAG.getConstant(ModRM, MVT::i8), Addr, 5141 TrmpAddr, 22); 5142 5143 SDOperand Ops[] = 5144 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 6) }; 5145 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5146 } else { 5147 const Function *Func = 5148 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 5149 unsigned CC = Func->getCallingConv(); 5150 unsigned NestReg; 5151 5152 switch (CC) { 5153 default: 5154 assert(0 && "Unsupported calling convention"); 5155 case CallingConv::C: 5156 case CallingConv::X86_StdCall: { 5157 // Pass 'nest' parameter in ECX. 5158 // Must be kept in sync with X86CallingConv.td 5159 NestReg = X86::ECX; 5160 5161 // Check that ECX wasn't needed by an 'inreg' parameter. 5162 const FunctionType *FTy = Func->getFunctionType(); 5163 const PAListPtr &Attrs = Func->getParamAttrs(); 5164 5165 if (!Attrs.isEmpty() && !Func->isVarArg()) { 5166 unsigned InRegCount = 0; 5167 unsigned Idx = 1; 5168 5169 for (FunctionType::param_iterator I = FTy->param_begin(), 5170 E = FTy->param_end(); I != E; ++I, ++Idx) 5171 if (Attrs.paramHasAttr(Idx, ParamAttr::InReg)) 5172 // FIXME: should only count parameters that are lowered to integers. 5173 InRegCount += (getTargetData()->getTypeSizeInBits(*I) + 31) / 32; 5174 5175 if (InRegCount > 2) { 5176 cerr << "Nest register in use - reduce number of inreg parameters!\n"; 5177 abort(); 5178 } 5179 } 5180 break; 5181 } 5182 case CallingConv::X86_FastCall: 5183 // Pass 'nest' parameter in EAX. 5184 // Must be kept in sync with X86CallingConv.td 5185 NestReg = X86::EAX; 5186 break; 5187 } 5188 5189 SDOperand OutChains[4]; 5190 SDOperand Addr, Disp; 5191 5192 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32)); 5193 Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr); 5194 5195 const unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri); 5196 const unsigned char N86Reg = 5197 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(NestReg); 5198 OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 5199 Trmp, TrmpAddr, 0); 5200 5201 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32)); 5202 OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 1, false, 1); 5203 5204 const unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP); 5205 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32)); 5206 OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr, 5207 TrmpAddr, 5, false, 1); 5208 5209 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(6, MVT::i32)); 5210 OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpAddr, 6, false, 1); 5211 5212 SDOperand Ops[] = 5213 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) }; 5214 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5215 } 5216} 5217 5218SDOperand X86TargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { 5219 /* 5220 The rounding mode is in bits 11:10 of FPSR, and has the following 5221 settings: 5222 00 Round to nearest 5223 01 Round to -inf 5224 10 Round to +inf 5225 11 Round to 0 5226 5227 FLT_ROUNDS, on the other hand, expects the following: 5228 -1 Undefined 5229 0 Round to 0 5230 1 Round to nearest 5231 2 Round to +inf 5232 3 Round to -inf 5233 5234 To perform the conversion, we do: 5235 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 5236 */ 5237 5238 MachineFunction &MF = DAG.getMachineFunction(); 5239 const TargetMachine &TM = MF.getTarget(); 5240 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 5241 unsigned StackAlignment = TFI.getStackAlignment(); 5242 MVT::ValueType VT = Op.getValueType(); 5243 5244 // Save FP Control Word to stack slot 5245 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment); 5246 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 5247 5248 SDOperand Chain = DAG.getNode(X86ISD::FNSTCW16m, MVT::Other, 5249 DAG.getEntryNode(), StackSlot); 5250 5251 // Load FP Control Word from stack slot 5252 SDOperand CWD = DAG.getLoad(MVT::i16, Chain, StackSlot, NULL, 0); 5253 5254 // Transform as necessary 5255 SDOperand CWD1 = 5256 DAG.getNode(ISD::SRL, MVT::i16, 5257 DAG.getNode(ISD::AND, MVT::i16, 5258 CWD, DAG.getConstant(0x800, MVT::i16)), 5259 DAG.getConstant(11, MVT::i8)); 5260 SDOperand CWD2 = 5261 DAG.getNode(ISD::SRL, MVT::i16, 5262 DAG.getNode(ISD::AND, MVT::i16, 5263 CWD, DAG.getConstant(0x400, MVT::i16)), 5264 DAG.getConstant(9, MVT::i8)); 5265 5266 SDOperand RetVal = 5267 DAG.getNode(ISD::AND, MVT::i16, 5268 DAG.getNode(ISD::ADD, MVT::i16, 5269 DAG.getNode(ISD::OR, MVT::i16, CWD1, CWD2), 5270 DAG.getConstant(1, MVT::i16)), 5271 DAG.getConstant(3, MVT::i16)); 5272 5273 5274 return DAG.getNode((MVT::getSizeInBits(VT) < 16 ? 5275 ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); 5276} 5277 5278SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) { 5279 MVT::ValueType VT = Op.getValueType(); 5280 MVT::ValueType OpVT = VT; 5281 unsigned NumBits = MVT::getSizeInBits(VT); 5282 5283 Op = Op.getOperand(0); 5284 if (VT == MVT::i8) { 5285 // Zero extend to i32 since there is not an i8 bsr. 5286 OpVT = MVT::i32; 5287 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5288 } 5289 5290 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 5291 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5292 Op = DAG.getNode(X86ISD::BSR, VTs, Op); 5293 5294 // If src is zero (i.e. bsr sets ZF), returns NumBits. 5295 SmallVector<SDOperand, 4> Ops; 5296 Ops.push_back(Op); 5297 Ops.push_back(DAG.getConstant(NumBits+NumBits-1, OpVT)); 5298 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5299 Ops.push_back(Op.getValue(1)); 5300 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5301 5302 // Finally xor with NumBits-1. 5303 Op = DAG.getNode(ISD::XOR, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 5304 5305 if (VT == MVT::i8) 5306 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5307 return Op; 5308} 5309 5310SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { 5311 MVT::ValueType VT = Op.getValueType(); 5312 MVT::ValueType OpVT = VT; 5313 unsigned NumBits = MVT::getSizeInBits(VT); 5314 5315 Op = Op.getOperand(0); 5316 if (VT == MVT::i8) { 5317 OpVT = MVT::i32; 5318 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5319 } 5320 5321 // Issue a bsf (scan bits forward) which also sets EFLAGS. 5322 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5323 Op = DAG.getNode(X86ISD::BSF, VTs, Op); 5324 5325 // If src is zero (i.e. bsf sets ZF), returns NumBits. 5326 SmallVector<SDOperand, 4> Ops; 5327 Ops.push_back(Op); 5328 Ops.push_back(DAG.getConstant(NumBits, OpVT)); 5329 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5330 Ops.push_back(Op.getValue(1)); 5331 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5332 5333 if (VT == MVT::i8) 5334 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5335 return Op; 5336} 5337 5338SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) { 5339 MVT::ValueType T = cast<AtomicSDNode>(Op.Val)->getVT(); 5340 unsigned Reg = 0; 5341 unsigned size = 0; 5342 switch(T) { 5343 case MVT::i8: Reg = X86::AL; size = 1; break; 5344 case MVT::i16: Reg = X86::AX; size = 2; break; 5345 case MVT::i32: Reg = X86::EAX; size = 4; break; 5346 case MVT::i64: 5347 if (Subtarget->is64Bit()) { 5348 Reg = X86::RAX; size = 8; 5349 } else //Should go away when LowerType stuff lands 5350 return SDOperand(ExpandATOMIC_LCS(Op.Val, DAG), 0); 5351 break; 5352 }; 5353 SDOperand cpIn = DAG.getCopyToReg(Op.getOperand(0), Reg, 5354 Op.getOperand(3), SDOperand()); 5355 SDOperand Ops[] = { cpIn.getValue(0), 5356 Op.getOperand(1), 5357 Op.getOperand(2), 5358 DAG.getTargetConstant(size, MVT::i8), 5359 cpIn.getValue(1) }; 5360 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 5361 SDOperand Result = DAG.getNode(X86ISD::LCMPXCHG_DAG, Tys, Ops, 5); 5362 SDOperand cpOut = 5363 DAG.getCopyFromReg(Result.getValue(0), Reg, T, Result.getValue(1)); 5364 return cpOut; 5365} 5366 5367SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) { 5368 MVT::ValueType T = cast<AtomicSDNode>(Op)->getVT(); 5369 assert (T == MVT::i64 && "Only know how to expand i64 CAS"); 5370 SDOperand cpInL, cpInH; 5371 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), 5372 DAG.getConstant(0, MVT::i32)); 5373 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), 5374 DAG.getConstant(1, MVT::i32)); 5375 cpInL = DAG.getCopyToReg(Op->getOperand(0), X86::EAX, 5376 cpInL, SDOperand()); 5377 cpInH = DAG.getCopyToReg(cpInL.getValue(0), X86::EDX, 5378 cpInH, cpInL.getValue(1)); 5379 SDOperand swapInL, swapInH; 5380 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(2), 5381 DAG.getConstant(0, MVT::i32)); 5382 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(2), 5383 DAG.getConstant(1, MVT::i32)); 5384 swapInL = DAG.getCopyToReg(cpInH.getValue(0), X86::EBX, 5385 swapInL, cpInH.getValue(1)); 5386 swapInH = DAG.getCopyToReg(swapInL.getValue(0), X86::ECX, 5387 swapInH, swapInL.getValue(1)); 5388 SDOperand Ops[] = { swapInH.getValue(0), 5389 Op->getOperand(1), 5390 swapInH.getValue(1)}; 5391 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 5392 SDOperand Result = DAG.getNode(X86ISD::LCMPXCHG8_DAG, Tys, Ops, 3); 5393 SDOperand cpOutL = DAG.getCopyFromReg(Result.getValue(0), X86::EAX, MVT::i32, 5394 Result.getValue(1)); 5395 SDOperand cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), X86::EDX, MVT::i32, 5396 cpOutL.getValue(2)); 5397 SDOperand OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 5398 SDOperand ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OpsF, 2); 5399 Tys = DAG.getVTList(MVT::i64, MVT::Other); 5400 return DAG.getNode(ISD::MERGE_VALUES, Tys, ResultVal, cpOutH.getValue(1)).Val; 5401} 5402 5403/// LowerOperation - Provide custom lowering hooks for some operations. 5404/// 5405SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 5406 switch (Op.getOpcode()) { 5407 default: assert(0 && "Should not custom lower this!"); 5408 case ISD::ATOMIC_LCS: return LowerLCS(Op,DAG); 5409 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 5410 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5411 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 5412 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 5413 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 5414 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5415 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 5416 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5417 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 5418 case ISD::SHL_PARTS: 5419 case ISD::SRA_PARTS: 5420 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 5421 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 5422 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 5423 case ISD::FABS: return LowerFABS(Op, DAG); 5424 case ISD::FNEG: return LowerFNEG(Op, DAG); 5425 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 5426 case ISD::SETCC: return LowerSETCC(Op, DAG); 5427 case ISD::SELECT: return LowerSELECT(Op, DAG); 5428 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 5429 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 5430 case ISD::CALL: return LowerCALL(Op, DAG); 5431 case ISD::RET: return LowerRET(Op, DAG); 5432 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); 5433 case ISD::MEMSET: return LowerMEMSET(Op, DAG); 5434 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG); 5435 case ISD::VASTART: return LowerVASTART(Op, DAG); 5436 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 5437 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 5438 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5439 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5440 case ISD::FRAME_TO_ARGS_OFFSET: 5441 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 5442 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 5443 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 5444 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 5445 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5446 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 5447 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 5448 5449 // FIXME: REMOVE THIS WHEN LegalizeDAGTypes lands. 5450 case ISD::READCYCLECOUNTER: 5451 return SDOperand(ExpandREADCYCLECOUNTER(Op.Val, DAG), 0); 5452 } 5453} 5454 5455/// ExpandOperation - Provide custom lowering hooks for expanding operations. 5456SDNode *X86TargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) { 5457 switch (N->getOpcode()) { 5458 default: assert(0 && "Should not custom lower this!"); 5459 case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG); 5460 case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG); 5461 case ISD::ATOMIC_LCS: return ExpandATOMIC_LCS(N, DAG); 5462 } 5463} 5464 5465const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 5466 switch (Opcode) { 5467 default: return NULL; 5468 case X86ISD::BSF: return "X86ISD::BSF"; 5469 case X86ISD::BSR: return "X86ISD::BSR"; 5470 case X86ISD::SHLD: return "X86ISD::SHLD"; 5471 case X86ISD::SHRD: return "X86ISD::SHRD"; 5472 case X86ISD::FAND: return "X86ISD::FAND"; 5473 case X86ISD::FOR: return "X86ISD::FOR"; 5474 case X86ISD::FXOR: return "X86ISD::FXOR"; 5475 case X86ISD::FSRL: return "X86ISD::FSRL"; 5476 case X86ISD::FILD: return "X86ISD::FILD"; 5477 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 5478 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 5479 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 5480 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 5481 case X86ISD::FLD: return "X86ISD::FLD"; 5482 case X86ISD::FST: return "X86ISD::FST"; 5483 case X86ISD::CALL: return "X86ISD::CALL"; 5484 case X86ISD::TAILCALL: return "X86ISD::TAILCALL"; 5485 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 5486 case X86ISD::CMP: return "X86ISD::CMP"; 5487 case X86ISD::COMI: return "X86ISD::COMI"; 5488 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 5489 case X86ISD::SETCC: return "X86ISD::SETCC"; 5490 case X86ISD::CMOV: return "X86ISD::CMOV"; 5491 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 5492 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 5493 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 5494 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 5495 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 5496 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 5497 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 5498 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 5499 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 5500 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 5501 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 5502 case X86ISD::FMAX: return "X86ISD::FMAX"; 5503 case X86ISD::FMIN: return "X86ISD::FMIN"; 5504 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 5505 case X86ISD::FRCP: return "X86ISD::FRCP"; 5506 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 5507 case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER"; 5508 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 5509 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 5510 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 5511 case X86ISD::LCMPXCHG_DAG: return "x86ISD::LCMPXCHG_DAG"; 5512 case X86ISD::LCMPXCHG8_DAG: return "x86ISD::LCMPXCHG8_DAG"; 5513 } 5514} 5515 5516// isLegalAddressingMode - Return true if the addressing mode represented 5517// by AM is legal for this target, for a load/store of the specified type. 5518bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 5519 const Type *Ty) const { 5520 // X86 supports extremely general addressing modes. 5521 5522 // X86 allows a sign-extended 32-bit immediate field as a displacement. 5523 if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1) 5524 return false; 5525 5526 if (AM.BaseGV) { 5527 // We can only fold this if we don't need an extra load. 5528 if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false)) 5529 return false; 5530 5531 // X86-64 only supports addr of globals in small code model. 5532 if (Subtarget->is64Bit()) { 5533 if (getTargetMachine().getCodeModel() != CodeModel::Small) 5534 return false; 5535 // If lower 4G is not available, then we must use rip-relative addressing. 5536 if (AM.BaseOffs || AM.Scale > 1) 5537 return false; 5538 } 5539 } 5540 5541 switch (AM.Scale) { 5542 case 0: 5543 case 1: 5544 case 2: 5545 case 4: 5546 case 8: 5547 // These scales always work. 5548 break; 5549 case 3: 5550 case 5: 5551 case 9: 5552 // These scales are formed with basereg+scalereg. Only accept if there is 5553 // no basereg yet. 5554 if (AM.HasBaseReg) 5555 return false; 5556 break; 5557 default: // Other stuff never works. 5558 return false; 5559 } 5560 5561 return true; 5562} 5563 5564 5565bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const { 5566 if (!Ty1->isInteger() || !Ty2->isInteger()) 5567 return false; 5568 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 5569 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 5570 if (NumBits1 <= NumBits2) 5571 return false; 5572 return Subtarget->is64Bit() || NumBits1 < 64; 5573} 5574 5575bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1, 5576 MVT::ValueType VT2) const { 5577 if (!MVT::isInteger(VT1) || !MVT::isInteger(VT2)) 5578 return false; 5579 unsigned NumBits1 = MVT::getSizeInBits(VT1); 5580 unsigned NumBits2 = MVT::getSizeInBits(VT2); 5581 if (NumBits1 <= NumBits2) 5582 return false; 5583 return Subtarget->is64Bit() || NumBits1 < 64; 5584} 5585 5586/// isShuffleMaskLegal - Targets can use this to indicate that they only 5587/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 5588/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 5589/// are assumed to be legal. 5590bool 5591X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { 5592 // Only do shuffles on 128-bit vector types for now. 5593 if (MVT::getSizeInBits(VT) == 64) return false; 5594 return (Mask.Val->getNumOperands() <= 4 || 5595 isIdentityMask(Mask.Val) || 5596 isIdentityMask(Mask.Val, true) || 5597 isSplatMask(Mask.Val) || 5598 isPSHUFHW_PSHUFLWMask(Mask.Val) || 5599 X86::isUNPCKLMask(Mask.Val) || 5600 X86::isUNPCKHMask(Mask.Val) || 5601 X86::isUNPCKL_v_undef_Mask(Mask.Val) || 5602 X86::isUNPCKH_v_undef_Mask(Mask.Val)); 5603} 5604 5605bool 5606X86TargetLowering::isVectorClearMaskLegal(const std::vector<SDOperand> &BVOps, 5607 MVT::ValueType EVT, 5608 SelectionDAG &DAG) const { 5609 unsigned NumElts = BVOps.size(); 5610 // Only do shuffles on 128-bit vector types for now. 5611 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; 5612 if (NumElts == 2) return true; 5613 if (NumElts == 4) { 5614 return (isMOVLMask(&BVOps[0], 4) || 5615 isCommutedMOVL(&BVOps[0], 4, true) || 5616 isSHUFPMask(&BVOps[0], 4) || 5617 isCommutedSHUFP(&BVOps[0], 4)); 5618 } 5619 return false; 5620} 5621 5622//===----------------------------------------------------------------------===// 5623// X86 Scheduler Hooks 5624//===----------------------------------------------------------------------===// 5625 5626MachineBasicBlock * 5627X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 5628 MachineBasicBlock *BB) { 5629 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5630 switch (MI->getOpcode()) { 5631 default: assert(false && "Unexpected instr type to insert"); 5632 case X86::CMOV_FR32: 5633 case X86::CMOV_FR64: 5634 case X86::CMOV_V4F32: 5635 case X86::CMOV_V2F64: 5636 case X86::CMOV_V2I64: { 5637 // To "insert" a SELECT_CC instruction, we actually have to insert the 5638 // diamond control-flow pattern. The incoming instruction knows the 5639 // destination vreg to set, the condition code register to branch on, the 5640 // true/false values to select between, and a branch opcode to use. 5641 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5642 ilist<MachineBasicBlock>::iterator It = BB; 5643 ++It; 5644 5645 // thisMBB: 5646 // ... 5647 // TrueVal = ... 5648 // cmpTY ccX, r1, r2 5649 // bCC copy1MBB 5650 // fallthrough --> copy0MBB 5651 MachineBasicBlock *thisMBB = BB; 5652 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 5653 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 5654 unsigned Opc = 5655 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 5656 BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB); 5657 MachineFunction *F = BB->getParent(); 5658 F->getBasicBlockList().insert(It, copy0MBB); 5659 F->getBasicBlockList().insert(It, sinkMBB); 5660 // Update machine-CFG edges by first adding all successors of the current 5661 // block to the new block which will contain the Phi node for the select. 5662 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 5663 e = BB->succ_end(); i != e; ++i) 5664 sinkMBB->addSuccessor(*i); 5665 // Next, remove all successors of the current block, and add the true 5666 // and fallthrough blocks as its successors. 5667 while(!BB->succ_empty()) 5668 BB->removeSuccessor(BB->succ_begin()); 5669 BB->addSuccessor(copy0MBB); 5670 BB->addSuccessor(sinkMBB); 5671 5672 // copy0MBB: 5673 // %FalseValue = ... 5674 // # fallthrough to sinkMBB 5675 BB = copy0MBB; 5676 5677 // Update machine-CFG edges 5678 BB->addSuccessor(sinkMBB); 5679 5680 // sinkMBB: 5681 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 5682 // ... 5683 BB = sinkMBB; 5684 BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg()) 5685 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 5686 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 5687 5688 delete MI; // The pseudo instruction is gone now. 5689 return BB; 5690 } 5691 5692 case X86::FP32_TO_INT16_IN_MEM: 5693 case X86::FP32_TO_INT32_IN_MEM: 5694 case X86::FP32_TO_INT64_IN_MEM: 5695 case X86::FP64_TO_INT16_IN_MEM: 5696 case X86::FP64_TO_INT32_IN_MEM: 5697 case X86::FP64_TO_INT64_IN_MEM: 5698 case X86::FP80_TO_INT16_IN_MEM: 5699 case X86::FP80_TO_INT32_IN_MEM: 5700 case X86::FP80_TO_INT64_IN_MEM: { 5701 // Change the floating point control register to use "round towards zero" 5702 // mode when truncating to an integer value. 5703 MachineFunction *F = BB->getParent(); 5704 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2); 5705 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx); 5706 5707 // Load the old value of the high byte of the control word... 5708 unsigned OldCW = 5709 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass); 5710 addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx); 5711 5712 // Set the high part to be round to zero... 5713 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx) 5714 .addImm(0xC7F); 5715 5716 // Reload the modified control word now... 5717 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5718 5719 // Restore the memory image of control word to original value 5720 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx) 5721 .addReg(OldCW); 5722 5723 // Get the X86 opcode to use. 5724 unsigned Opc; 5725 switch (MI->getOpcode()) { 5726 default: assert(0 && "illegal opcode!"); 5727 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 5728 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 5729 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 5730 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 5731 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 5732 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 5733 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 5734 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 5735 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 5736 } 5737 5738 X86AddressMode AM; 5739 MachineOperand &Op = MI->getOperand(0); 5740 if (Op.isRegister()) { 5741 AM.BaseType = X86AddressMode::RegBase; 5742 AM.Base.Reg = Op.getReg(); 5743 } else { 5744 AM.BaseType = X86AddressMode::FrameIndexBase; 5745 AM.Base.FrameIndex = Op.getIndex(); 5746 } 5747 Op = MI->getOperand(1); 5748 if (Op.isImmediate()) 5749 AM.Scale = Op.getImm(); 5750 Op = MI->getOperand(2); 5751 if (Op.isImmediate()) 5752 AM.IndexReg = Op.getImm(); 5753 Op = MI->getOperand(3); 5754 if (Op.isGlobalAddress()) { 5755 AM.GV = Op.getGlobal(); 5756 } else { 5757 AM.Disp = Op.getImm(); 5758 } 5759 addFullAddress(BuildMI(BB, TII->get(Opc)), AM) 5760 .addReg(MI->getOperand(4).getReg()); 5761 5762 // Reload the original control word now. 5763 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5764 5765 delete MI; // The pseudo instruction is gone now. 5766 return BB; 5767 } 5768 } 5769} 5770 5771//===----------------------------------------------------------------------===// 5772// X86 Optimization Hooks 5773//===----------------------------------------------------------------------===// 5774 5775void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 5776 const APInt &Mask, 5777 APInt &KnownZero, 5778 APInt &KnownOne, 5779 const SelectionDAG &DAG, 5780 unsigned Depth) const { 5781 unsigned Opc = Op.getOpcode(); 5782 assert((Opc >= ISD::BUILTIN_OP_END || 5783 Opc == ISD::INTRINSIC_WO_CHAIN || 5784 Opc == ISD::INTRINSIC_W_CHAIN || 5785 Opc == ISD::INTRINSIC_VOID) && 5786 "Should use MaskedValueIsZero if you don't know whether Op" 5787 " is a target node!"); 5788 5789 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything. 5790 switch (Opc) { 5791 default: break; 5792 case X86ISD::SETCC: 5793 KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(), 5794 Mask.getBitWidth() - 1); 5795 break; 5796 } 5797} 5798 5799/// getShuffleScalarElt - Returns the scalar element that will make up the ith 5800/// element of the result of the vector shuffle. 5801static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) { 5802 MVT::ValueType VT = N->getValueType(0); 5803 SDOperand PermMask = N->getOperand(2); 5804 unsigned NumElems = PermMask.getNumOperands(); 5805 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1); 5806 i %= NumElems; 5807 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5808 return (i == 0) 5809 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5810 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) { 5811 SDOperand Idx = PermMask.getOperand(i); 5812 if (Idx.getOpcode() == ISD::UNDEF) 5813 return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5814 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG); 5815 } 5816 return SDOperand(); 5817} 5818 5819/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 5820/// node is a GlobalAddress + an offset. 5821static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) { 5822 unsigned Opc = N->getOpcode(); 5823 if (Opc == X86ISD::Wrapper) { 5824 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) { 5825 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 5826 return true; 5827 } 5828 } else if (Opc == ISD::ADD) { 5829 SDOperand N1 = N->getOperand(0); 5830 SDOperand N2 = N->getOperand(1); 5831 if (isGAPlusOffset(N1.Val, GA, Offset)) { 5832 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 5833 if (V) { 5834 Offset += V->getSignExtended(); 5835 return true; 5836 } 5837 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 5838 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 5839 if (V) { 5840 Offset += V->getSignExtended(); 5841 return true; 5842 } 5843 } 5844 } 5845 return false; 5846} 5847 5848/// isConsecutiveLoad - Returns true if N is loading from an address of Base 5849/// + Dist * Size. 5850static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size, 5851 MachineFrameInfo *MFI) { 5852 if (N->getOperand(0).Val != Base->getOperand(0).Val) 5853 return false; 5854 5855 SDOperand Loc = N->getOperand(1); 5856 SDOperand BaseLoc = Base->getOperand(1); 5857 if (Loc.getOpcode() == ISD::FrameIndex) { 5858 if (BaseLoc.getOpcode() != ISD::FrameIndex) 5859 return false; 5860 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 5861 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 5862 int FS = MFI->getObjectSize(FI); 5863 int BFS = MFI->getObjectSize(BFI); 5864 if (FS != BFS || FS != Size) return false; 5865 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size); 5866 } else { 5867 GlobalValue *GV1 = NULL; 5868 GlobalValue *GV2 = NULL; 5869 int64_t Offset1 = 0; 5870 int64_t Offset2 = 0; 5871 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 5872 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 5873 if (isGA1 && isGA2 && GV1 == GV2) 5874 return Offset1 == (Offset2 + Dist*Size); 5875 } 5876 5877 return false; 5878} 5879 5880static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI, 5881 const X86Subtarget *Subtarget) { 5882 GlobalValue *GV; 5883 int64_t Offset = 0; 5884 if (isGAPlusOffset(Base, GV, Offset)) 5885 return (GV->getAlignment() >= 16 && (Offset % 16) == 0); 5886 // DAG combine handles the stack object case. 5887 return false; 5888} 5889 5890 5891/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 5892/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 5893/// if the load addresses are consecutive, non-overlapping, and in the right 5894/// order. 5895static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 5896 const X86Subtarget *Subtarget) { 5897 MachineFunction &MF = DAG.getMachineFunction(); 5898 MachineFrameInfo *MFI = MF.getFrameInfo(); 5899 MVT::ValueType VT = N->getValueType(0); 5900 MVT::ValueType EVT = MVT::getVectorElementType(VT); 5901 SDOperand PermMask = N->getOperand(2); 5902 int NumElems = (int)PermMask.getNumOperands(); 5903 SDNode *Base = NULL; 5904 for (int i = 0; i < NumElems; ++i) { 5905 SDOperand Idx = PermMask.getOperand(i); 5906 if (Idx.getOpcode() == ISD::UNDEF) { 5907 if (!Base) return SDOperand(); 5908 } else { 5909 SDOperand Arg = 5910 getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG); 5911 if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val)) 5912 return SDOperand(); 5913 if (!Base) 5914 Base = Arg.Val; 5915 else if (!isConsecutiveLoad(Arg.Val, Base, 5916 i, MVT::getSizeInBits(EVT)/8,MFI)) 5917 return SDOperand(); 5918 } 5919 } 5920 5921 bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget); 5922 LoadSDNode *LD = cast<LoadSDNode>(Base); 5923 if (isAlign16) { 5924 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 5925 LD->getSrcValueOffset(), LD->isVolatile()); 5926 } else { 5927 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 5928 LD->getSrcValueOffset(), LD->isVolatile(), 5929 LD->getAlignment()); 5930 } 5931} 5932 5933/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 5934static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 5935 const X86Subtarget *Subtarget) { 5936 SDOperand Cond = N->getOperand(0); 5937 5938 // If we have SSE[12] support, try to form min/max nodes. 5939 if (Subtarget->hasSSE2() && 5940 (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) { 5941 if (Cond.getOpcode() == ISD::SETCC) { 5942 // Get the LHS/RHS of the select. 5943 SDOperand LHS = N->getOperand(1); 5944 SDOperand RHS = N->getOperand(2); 5945 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 5946 5947 unsigned Opcode = 0; 5948 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) { 5949 switch (CC) { 5950 default: break; 5951 case ISD::SETOLE: // (X <= Y) ? X : Y -> min 5952 case ISD::SETULE: 5953 case ISD::SETLE: 5954 if (!UnsafeFPMath) break; 5955 // FALL THROUGH. 5956 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min 5957 case ISD::SETLT: 5958 Opcode = X86ISD::FMIN; 5959 break; 5960 5961 case ISD::SETOGT: // (X > Y) ? X : Y -> max 5962 case ISD::SETUGT: 5963 case ISD::SETGT: 5964 if (!UnsafeFPMath) break; 5965 // FALL THROUGH. 5966 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max 5967 case ISD::SETGE: 5968 Opcode = X86ISD::FMAX; 5969 break; 5970 } 5971 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) { 5972 switch (CC) { 5973 default: break; 5974 case ISD::SETOGT: // (X > Y) ? Y : X -> min 5975 case ISD::SETUGT: 5976 case ISD::SETGT: 5977 if (!UnsafeFPMath) break; 5978 // FALL THROUGH. 5979 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min 5980 case ISD::SETGE: 5981 Opcode = X86ISD::FMIN; 5982 break; 5983 5984 case ISD::SETOLE: // (X <= Y) ? Y : X -> max 5985 case ISD::SETULE: 5986 case ISD::SETLE: 5987 if (!UnsafeFPMath) break; 5988 // FALL THROUGH. 5989 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max 5990 case ISD::SETLT: 5991 Opcode = X86ISD::FMAX; 5992 break; 5993 } 5994 } 5995 5996 if (Opcode) 5997 return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS); 5998 } 5999 6000 } 6001 6002 return SDOperand(); 6003} 6004 6005/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 6006static SDOperand PerformSTORECombine(StoreSDNode *St, SelectionDAG &DAG, 6007 const X86Subtarget *Subtarget) { 6008 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 6009 // the FP state in cases where an emms may be missing. 6010 // A preferable solution to the general problem is to figure out the right 6011 // places to insert EMMS. This qualifies as a quick hack. 6012 if (MVT::isVector(St->getValue().getValueType()) && 6013 MVT::getSizeInBits(St->getValue().getValueType()) == 64 && 6014 isa<LoadSDNode>(St->getValue()) && 6015 !cast<LoadSDNode>(St->getValue())->isVolatile() && 6016 St->getChain().hasOneUse() && !St->isVolatile()) { 6017 SDNode* LdVal = St->getValue().Val; 6018 LoadSDNode *Ld = 0; 6019 int TokenFactorIndex = -1; 6020 SmallVector<SDOperand, 8> Ops; 6021 SDNode* ChainVal = St->getChain().Val; 6022 // Must be a store of a load. We currently handle two cases: the load 6023 // is a direct child, and it's under an intervening TokenFactor. It is 6024 // possible to dig deeper under nested TokenFactors. 6025 if (ChainVal == LdVal) 6026 Ld = cast<LoadSDNode>(St->getChain()); 6027 else if (St->getValue().hasOneUse() && 6028 ChainVal->getOpcode() == ISD::TokenFactor) { 6029 for (unsigned i=0, e = ChainVal->getNumOperands(); i != e; ++i) { 6030 if (ChainVal->getOperand(i).Val == LdVal) { 6031 TokenFactorIndex = i; 6032 Ld = cast<LoadSDNode>(St->getValue()); 6033 } else 6034 Ops.push_back(ChainVal->getOperand(i)); 6035 } 6036 } 6037 if (Ld) { 6038 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 6039 if (Subtarget->is64Bit()) { 6040 SDOperand NewLd = DAG.getLoad(MVT::i64, Ld->getChain(), 6041 Ld->getBasePtr(), Ld->getSrcValue(), 6042 Ld->getSrcValueOffset(), Ld->isVolatile(), 6043 Ld->getAlignment()); 6044 SDOperand NewChain = NewLd.getValue(1); 6045 if (TokenFactorIndex != -1) { 6046 Ops.push_back(NewChain); 6047 NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0], 6048 Ops.size()); 6049 } 6050 return DAG.getStore(NewChain, NewLd, St->getBasePtr(), 6051 St->getSrcValue(), St->getSrcValueOffset(), 6052 St->isVolatile(), St->getAlignment()); 6053 } 6054 6055 // Otherwise, lower to two 32-bit copies. 6056 SDOperand LoAddr = Ld->getBasePtr(); 6057 SDOperand HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, 6058 DAG.getConstant(MVT::i32, 4)); 6059 6060 SDOperand LoLd = DAG.getLoad(MVT::i32, Ld->getChain(), LoAddr, 6061 Ld->getSrcValue(), Ld->getSrcValueOffset(), 6062 Ld->isVolatile(), Ld->getAlignment()); 6063 SDOperand HiLd = DAG.getLoad(MVT::i32, Ld->getChain(), HiAddr, 6064 Ld->getSrcValue(), Ld->getSrcValueOffset()+4, 6065 Ld->isVolatile(), 6066 MinAlign(Ld->getAlignment(), 4)); 6067 6068 SDOperand NewChain = LoLd.getValue(1); 6069 if (TokenFactorIndex != -1) { 6070 Ops.push_back(LoLd); 6071 Ops.push_back(HiLd); 6072 NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0], 6073 Ops.size()); 6074 } 6075 6076 LoAddr = St->getBasePtr(); 6077 HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, 6078 DAG.getConstant(MVT::i32, 4)); 6079 6080 SDOperand LoSt = DAG.getStore(NewChain, LoLd, LoAddr, 6081 St->getSrcValue(), St->getSrcValueOffset(), 6082 St->isVolatile(), St->getAlignment()); 6083 SDOperand HiSt = DAG.getStore(NewChain, HiLd, HiAddr, 6084 St->getSrcValue(), St->getSrcValueOffset()+4, 6085 St->isVolatile(), 6086 MinAlign(St->getAlignment(), 4)); 6087 return DAG.getNode(ISD::TokenFactor, MVT::Other, LoSt, HiSt); 6088 } 6089 } 6090 return SDOperand(); 6091} 6092 6093/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 6094/// X86ISD::FXOR nodes. 6095static SDOperand PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 6096 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 6097 // F[X]OR(0.0, x) -> x 6098 // F[X]OR(x, 0.0) -> x 6099 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 6100 if (C->getValueAPF().isPosZero()) 6101 return N->getOperand(1); 6102 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 6103 if (C->getValueAPF().isPosZero()) 6104 return N->getOperand(0); 6105 return SDOperand(); 6106} 6107 6108/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 6109static SDOperand PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 6110 // FAND(0.0, x) -> 0.0 6111 // FAND(x, 0.0) -> 0.0 6112 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 6113 if (C->getValueAPF().isPosZero()) 6114 return N->getOperand(0); 6115 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 6116 if (C->getValueAPF().isPosZero()) 6117 return N->getOperand(1); 6118 return SDOperand(); 6119} 6120 6121 6122SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, 6123 DAGCombinerInfo &DCI) const { 6124 SelectionDAG &DAG = DCI.DAG; 6125 switch (N->getOpcode()) { 6126 default: break; 6127 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, Subtarget); 6128 case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget); 6129 case ISD::STORE: 6130 return PerformSTORECombine(cast<StoreSDNode>(N), DAG, Subtarget); 6131 case X86ISD::FXOR: 6132 case X86ISD::FOR: return PerformFORCombine(N, DAG); 6133 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 6134 } 6135 6136 return SDOperand(); 6137} 6138 6139//===----------------------------------------------------------------------===// 6140// X86 Inline Assembly Support 6141//===----------------------------------------------------------------------===// 6142 6143/// getConstraintType - Given a constraint letter, return the type of 6144/// constraint it is for this target. 6145X86TargetLowering::ConstraintType 6146X86TargetLowering::getConstraintType(const std::string &Constraint) const { 6147 if (Constraint.size() == 1) { 6148 switch (Constraint[0]) { 6149 case 'A': 6150 case 'f': 6151 case 'r': 6152 case 'R': 6153 case 'l': 6154 case 'q': 6155 case 'Q': 6156 case 'x': 6157 case 'y': 6158 case 'Y': 6159 return C_RegisterClass; 6160 default: 6161 break; 6162 } 6163 } 6164 return TargetLowering::getConstraintType(Constraint); 6165} 6166 6167/// LowerXConstraint - try to replace an X constraint, which matches anything, 6168/// with another that has more specific requirements based on the type of the 6169/// corresponding operand. 6170void X86TargetLowering::lowerXConstraint(MVT::ValueType ConstraintVT, 6171 std::string& s) const { 6172 if (MVT::isFloatingPoint(ConstraintVT)) { 6173 if (Subtarget->hasSSE2()) 6174 s = "Y"; 6175 else if (Subtarget->hasSSE1()) 6176 s = "x"; 6177 else 6178 s = "f"; 6179 } else 6180 return TargetLowering::lowerXConstraint(ConstraintVT, s); 6181} 6182 6183/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 6184/// vector. If it is invalid, don't add anything to Ops. 6185void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, 6186 char Constraint, 6187 std::vector<SDOperand>&Ops, 6188 SelectionDAG &DAG) { 6189 SDOperand Result(0, 0); 6190 6191 switch (Constraint) { 6192 default: break; 6193 case 'I': 6194 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 6195 if (C->getValue() <= 31) { 6196 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 6197 break; 6198 } 6199 } 6200 return; 6201 case 'N': 6202 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 6203 if (C->getValue() <= 255) { 6204 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 6205 break; 6206 } 6207 } 6208 return; 6209 case 'i': { 6210 // Literal immediates are always ok. 6211 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 6212 Result = DAG.getTargetConstant(CST->getValue(), Op.getValueType()); 6213 break; 6214 } 6215 6216 // If we are in non-pic codegen mode, we allow the address of a global (with 6217 // an optional displacement) to be used with 'i'. 6218 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 6219 int64_t Offset = 0; 6220 6221 // Match either (GA) or (GA+C) 6222 if (GA) { 6223 Offset = GA->getOffset(); 6224 } else if (Op.getOpcode() == ISD::ADD) { 6225 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6226 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 6227 if (C && GA) { 6228 Offset = GA->getOffset()+C->getValue(); 6229 } else { 6230 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6231 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 6232 if (C && GA) 6233 Offset = GA->getOffset()+C->getValue(); 6234 else 6235 C = 0, GA = 0; 6236 } 6237 } 6238 6239 if (GA) { 6240 // If addressing this global requires a load (e.g. in PIC mode), we can't 6241 // match. 6242 if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(), 6243 false)) 6244 return; 6245 6246 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), 6247 Offset); 6248 Result = Op; 6249 break; 6250 } 6251 6252 // Otherwise, not valid for this mode. 6253 return; 6254 } 6255 } 6256 6257 if (Result.Val) { 6258 Ops.push_back(Result); 6259 return; 6260 } 6261 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 6262} 6263 6264std::vector<unsigned> X86TargetLowering:: 6265getRegClassForInlineAsmConstraint(const std::string &Constraint, 6266 MVT::ValueType VT) const { 6267 if (Constraint.size() == 1) { 6268 // FIXME: not handling fp-stack yet! 6269 switch (Constraint[0]) { // GCC X86 Constraint Letters 6270 default: break; // Unknown constraint letter 6271 case 'A': // EAX/EDX 6272 if (VT == MVT::i32 || VT == MVT::i64) 6273 return make_vector<unsigned>(X86::EAX, X86::EDX, 0); 6274 break; 6275 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode) 6276 case 'Q': // Q_REGS 6277 if (VT == MVT::i32) 6278 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 6279 else if (VT == MVT::i16) 6280 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 6281 else if (VT == MVT::i8) 6282 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0); 6283 else if (VT == MVT::i64) 6284 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0); 6285 break; 6286 } 6287 } 6288 6289 return std::vector<unsigned>(); 6290} 6291 6292std::pair<unsigned, const TargetRegisterClass*> 6293X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 6294 MVT::ValueType VT) const { 6295 // First, see if this is a constraint that directly corresponds to an LLVM 6296 // register class. 6297 if (Constraint.size() == 1) { 6298 // GCC Constraint Letters 6299 switch (Constraint[0]) { 6300 default: break; 6301 case 'r': // GENERAL_REGS 6302 case 'R': // LEGACY_REGS 6303 case 'l': // INDEX_REGS 6304 if (VT == MVT::i64 && Subtarget->is64Bit()) 6305 return std::make_pair(0U, X86::GR64RegisterClass); 6306 if (VT == MVT::i32) 6307 return std::make_pair(0U, X86::GR32RegisterClass); 6308 else if (VT == MVT::i16) 6309 return std::make_pair(0U, X86::GR16RegisterClass); 6310 else if (VT == MVT::i8) 6311 return std::make_pair(0U, X86::GR8RegisterClass); 6312 break; 6313 case 'f': // FP Stack registers. 6314 // If SSE is enabled for this VT, use f80 to ensure the isel moves the 6315 // value to the correct fpstack register class. 6316 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) 6317 return std::make_pair(0U, X86::RFP32RegisterClass); 6318 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) 6319 return std::make_pair(0U, X86::RFP64RegisterClass); 6320 return std::make_pair(0U, X86::RFP80RegisterClass); 6321 case 'y': // MMX_REGS if MMX allowed. 6322 if (!Subtarget->hasMMX()) break; 6323 return std::make_pair(0U, X86::VR64RegisterClass); 6324 break; 6325 case 'Y': // SSE_REGS if SSE2 allowed 6326 if (!Subtarget->hasSSE2()) break; 6327 // FALL THROUGH. 6328 case 'x': // SSE_REGS if SSE1 allowed 6329 if (!Subtarget->hasSSE1()) break; 6330 6331 switch (VT) { 6332 default: break; 6333 // Scalar SSE types. 6334 case MVT::f32: 6335 case MVT::i32: 6336 return std::make_pair(0U, X86::FR32RegisterClass); 6337 case MVT::f64: 6338 case MVT::i64: 6339 return std::make_pair(0U, X86::FR64RegisterClass); 6340 // Vector types. 6341 case MVT::v16i8: 6342 case MVT::v8i16: 6343 case MVT::v4i32: 6344 case MVT::v2i64: 6345 case MVT::v4f32: 6346 case MVT::v2f64: 6347 return std::make_pair(0U, X86::VR128RegisterClass); 6348 } 6349 break; 6350 } 6351 } 6352 6353 // Use the default implementation in TargetLowering to convert the register 6354 // constraint into a member of a register class. 6355 std::pair<unsigned, const TargetRegisterClass*> Res; 6356 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 6357 6358 // Not found as a standard register? 6359 if (Res.second == 0) { 6360 // GCC calls "st(0)" just plain "st". 6361 if (StringsEqualNoCase("{st}", Constraint)) { 6362 Res.first = X86::ST0; 6363 Res.second = X86::RFP80RegisterClass; 6364 } 6365 6366 return Res; 6367 } 6368 6369 // Otherwise, check to see if this is a register class of the wrong value 6370 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 6371 // turn into {ax},{dx}. 6372 if (Res.second->hasType(VT)) 6373 return Res; // Correct type already, nothing to do. 6374 6375 // All of the single-register GCC register classes map their values onto 6376 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 6377 // really want an 8-bit or 32-bit register, map to the appropriate register 6378 // class and return the appropriate register. 6379 if (Res.second != X86::GR16RegisterClass) 6380 return Res; 6381 6382 if (VT == MVT::i8) { 6383 unsigned DestReg = 0; 6384 switch (Res.first) { 6385 default: break; 6386 case X86::AX: DestReg = X86::AL; break; 6387 case X86::DX: DestReg = X86::DL; break; 6388 case X86::CX: DestReg = X86::CL; break; 6389 case X86::BX: DestReg = X86::BL; break; 6390 } 6391 if (DestReg) { 6392 Res.first = DestReg; 6393 Res.second = Res.second = X86::GR8RegisterClass; 6394 } 6395 } else if (VT == MVT::i32) { 6396 unsigned DestReg = 0; 6397 switch (Res.first) { 6398 default: break; 6399 case X86::AX: DestReg = X86::EAX; break; 6400 case X86::DX: DestReg = X86::EDX; break; 6401 case X86::CX: DestReg = X86::ECX; break; 6402 case X86::BX: DestReg = X86::EBX; break; 6403 case X86::SI: DestReg = X86::ESI; break; 6404 case X86::DI: DestReg = X86::EDI; break; 6405 case X86::BP: DestReg = X86::EBP; break; 6406 case X86::SP: DestReg = X86::ESP; break; 6407 } 6408 if (DestReg) { 6409 Res.first = DestReg; 6410 Res.second = Res.second = X86::GR32RegisterClass; 6411 } 6412 } else if (VT == MVT::i64) { 6413 unsigned DestReg = 0; 6414 switch (Res.first) { 6415 default: break; 6416 case X86::AX: DestReg = X86::RAX; break; 6417 case X86::DX: DestReg = X86::RDX; break; 6418 case X86::CX: DestReg = X86::RCX; break; 6419 case X86::BX: DestReg = X86::RBX; break; 6420 case X86::SI: DestReg = X86::RSI; break; 6421 case X86::DI: DestReg = X86::RDI; break; 6422 case X86::BP: DestReg = X86::RBP; break; 6423 case X86::SP: DestReg = X86::RSP; break; 6424 } 6425 if (DestReg) { 6426 Res.first = DestReg; 6427 Res.second = Res.second = X86::GR64RegisterClass; 6428 } 6429 } 6430 6431 return Res; 6432} 6433