X86ISelLowering.cpp revision 25210da75746362badd623be54944439b2e64dbf
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86ISelLowering.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86TargetMachine.h" 20#include "llvm/CallingConv.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/ADT/BitVector.h" 27#include "llvm/ADT/VectorExtras.h" 28#include "llvm/Analysis/ScalarEvolutionExpressions.h" 29#include "llvm/CodeGen/CallingConvLower.h" 30#include "llvm/CodeGen/MachineFrameInfo.h" 31#include "llvm/CodeGen/MachineFunction.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/MachineModuleInfo.h" 34#include "llvm/CodeGen/MachineRegisterInfo.h" 35#include "llvm/CodeGen/PseudoSourceValue.h" 36#include "llvm/CodeGen/SelectionDAG.h" 37#include "llvm/Support/MathExtras.h" 38#include "llvm/Support/Debug.h" 39#include "llvm/Target/TargetOptions.h" 40#include "llvm/ADT/SmallSet.h" 41#include "llvm/ADT/StringExtras.h" 42using namespace llvm; 43 44// Forward declarations. 45static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG); 46 47X86TargetLowering::X86TargetLowering(TargetMachine &TM) 48 : TargetLowering(TM) { 49 Subtarget = &TM.getSubtarget<X86Subtarget>(); 50 X86ScalarSSEf64 = Subtarget->hasSSE2(); 51 X86ScalarSSEf32 = Subtarget->hasSSE1(); 52 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 53 54 bool Fast = false; 55 56 RegInfo = TM.getRegisterInfo(); 57 58 // Set up the TargetLowering object. 59 60 // X86 is weird, it always uses i8 for shift amounts and setcc results. 61 setShiftAmountType(MVT::i8); 62 setSetCCResultContents(ZeroOrOneSetCCResult); 63 setSchedulingPreference(SchedulingForRegPressure); 64 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0 65 setStackPointerRegisterToSaveRestore(X86StackPtr); 66 67 if (Subtarget->isTargetDarwin()) { 68 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 69 setUseUnderscoreSetJmp(false); 70 setUseUnderscoreLongJmp(false); 71 } else if (Subtarget->isTargetMingw()) { 72 // MS runtime is weird: it exports _setjmp, but longjmp! 73 setUseUnderscoreSetJmp(true); 74 setUseUnderscoreLongJmp(false); 75 } else { 76 setUseUnderscoreSetJmp(true); 77 setUseUnderscoreLongJmp(true); 78 } 79 80 // Set up the register classes. 81 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 82 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 83 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 84 if (Subtarget->is64Bit()) 85 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 86 87 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote); 88 89 // We don't accept any truncstore of integer registers. 90 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 91 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 92 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 93 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 94 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 95 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 96 97 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 98 // operation. 99 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 100 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 101 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 102 103 if (Subtarget->is64Bit()) { 104 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 105 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 106 } else { 107 if (X86ScalarSSEf64) 108 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP. 109 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand); 110 else 111 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 112 } 113 114 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 115 // this operation. 116 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 117 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 118 // SSE has no i16 to fp conversion, only i32 119 if (X86ScalarSSEf32) { 120 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 121 // f32 and f64 cases are Legal, f80 case is not 122 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 123 } else { 124 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 125 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 126 } 127 128 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 129 // are Legal, f80 is custom lowered. 130 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 131 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 132 133 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 134 // this operation. 135 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 136 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 137 138 if (X86ScalarSSEf32) { 139 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 140 // f32 and f64 cases are Legal, f80 case is not 141 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 142 } else { 143 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 144 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 145 } 146 147 // Handle FP_TO_UINT by promoting the destination to a larger signed 148 // conversion. 149 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 150 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 151 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 152 153 if (Subtarget->is64Bit()) { 154 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 155 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 156 } else { 157 if (X86ScalarSSEf32 && !Subtarget->hasSSE3()) 158 // Expand FP_TO_UINT into a select. 159 // FIXME: We would like to use a Custom expander here eventually to do 160 // the optimal thing for SSE vs. the default expansion in the legalizer. 161 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 162 else 163 // With SSE3 we can use fisttpll to convert to a signed i64. 164 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 165 } 166 167 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 168 if (!X86ScalarSSEf64) { 169 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 170 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 171 } 172 173 // Scalar integer divide and remainder are lowered to use operations that 174 // produce two results, to match the available instructions. This exposes 175 // the two-result form to trivial CSE, which is able to combine x/y and x%y 176 // into a single instruction. 177 // 178 // Scalar integer multiply-high is also lowered to use two-result 179 // operations, to match the available instructions. However, plain multiply 180 // (low) operations are left as Legal, as there are single-result 181 // instructions for this in x86. Using the two-result multiply instructions 182 // when both high and low results are needed must be arranged by dagcombine. 183 setOperationAction(ISD::MULHS , MVT::i8 , Expand); 184 setOperationAction(ISD::MULHU , MVT::i8 , Expand); 185 setOperationAction(ISD::SDIV , MVT::i8 , Expand); 186 setOperationAction(ISD::UDIV , MVT::i8 , Expand); 187 setOperationAction(ISD::SREM , MVT::i8 , Expand); 188 setOperationAction(ISD::UREM , MVT::i8 , Expand); 189 setOperationAction(ISD::MULHS , MVT::i16 , Expand); 190 setOperationAction(ISD::MULHU , MVT::i16 , Expand); 191 setOperationAction(ISD::SDIV , MVT::i16 , Expand); 192 setOperationAction(ISD::UDIV , MVT::i16 , Expand); 193 setOperationAction(ISD::SREM , MVT::i16 , Expand); 194 setOperationAction(ISD::UREM , MVT::i16 , Expand); 195 setOperationAction(ISD::MULHS , MVT::i32 , Expand); 196 setOperationAction(ISD::MULHU , MVT::i32 , Expand); 197 setOperationAction(ISD::SDIV , MVT::i32 , Expand); 198 setOperationAction(ISD::UDIV , MVT::i32 , Expand); 199 setOperationAction(ISD::SREM , MVT::i32 , Expand); 200 setOperationAction(ISD::UREM , MVT::i32 , Expand); 201 setOperationAction(ISD::MULHS , MVT::i64 , Expand); 202 setOperationAction(ISD::MULHU , MVT::i64 , Expand); 203 setOperationAction(ISD::SDIV , MVT::i64 , Expand); 204 setOperationAction(ISD::UDIV , MVT::i64 , Expand); 205 setOperationAction(ISD::SREM , MVT::i64 , Expand); 206 setOperationAction(ISD::UREM , MVT::i64 , Expand); 207 208 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 209 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 210 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 211 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 212 if (Subtarget->is64Bit()) 213 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 214 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 215 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 216 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 217 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 218 setOperationAction(ISD::FREM , MVT::f32 , Expand); 219 setOperationAction(ISD::FREM , MVT::f64 , Expand); 220 setOperationAction(ISD::FREM , MVT::f80 , Expand); 221 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 222 223 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 224 setOperationAction(ISD::CTTZ , MVT::i8 , Custom); 225 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 226 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 227 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 228 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 229 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 230 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 231 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 232 if (Subtarget->is64Bit()) { 233 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 234 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 235 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 236 } 237 238 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 239 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 240 241 // These should be promoted to a larger select which is supported. 242 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 243 setOperationAction(ISD::SELECT , MVT::i8 , Promote); 244 // X86 wants to expand cmov itself. 245 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 246 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 247 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 248 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 249 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 250 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 251 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 252 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 253 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 254 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 255 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 256 if (Subtarget->is64Bit()) { 257 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 258 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 259 } 260 // X86 ret instruction may pop stack. 261 setOperationAction(ISD::RET , MVT::Other, Custom); 262 if (!Subtarget->is64Bit()) 263 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 264 265 // Darwin ABI issue. 266 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 267 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 268 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 269 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 270 if (Subtarget->is64Bit()) 271 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 272 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 273 if (Subtarget->is64Bit()) { 274 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 275 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 276 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 277 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 278 } 279 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 280 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 281 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 282 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 283 if (Subtarget->is64Bit()) { 284 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 285 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 286 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 287 } 288 289 if (Subtarget->hasSSE1()) 290 setOperationAction(ISD::PREFETCH , MVT::Other, Legal); 291 292 if (!Subtarget->hasSSE2()) 293 setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand); 294 295 // Expand certain atomics 296 setOperationAction(ISD::ATOMIC_LCS , MVT::i8, Custom); 297 setOperationAction(ISD::ATOMIC_LCS , MVT::i16, Custom); 298 setOperationAction(ISD::ATOMIC_LCS , MVT::i32, Custom); 299 setOperationAction(ISD::ATOMIC_LCS , MVT::i64, Custom); 300 setOperationAction(ISD::ATOMIC_LSS , MVT::i32, Expand); 301 302 // Use the default ISD::LOCATION, ISD::DECLARE expansion. 303 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 304 // FIXME - use subtarget debug flags 305 if (!Subtarget->isTargetDarwin() && 306 !Subtarget->isTargetELF() && 307 !Subtarget->isTargetCygMing()) 308 setOperationAction(ISD::LABEL, MVT::Other, Expand); 309 310 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 311 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 312 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 313 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 314 if (Subtarget->is64Bit()) { 315 // FIXME: Verify 316 setExceptionPointerRegister(X86::RAX); 317 setExceptionSelectorRegister(X86::RDX); 318 } else { 319 setExceptionPointerRegister(X86::EAX); 320 setExceptionSelectorRegister(X86::EDX); 321 } 322 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 323 324 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 325 326 setOperationAction(ISD::TRAP, MVT::Other, Legal); 327 328 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 329 setOperationAction(ISD::VASTART , MVT::Other, Custom); 330 setOperationAction(ISD::VAARG , MVT::Other, Expand); 331 setOperationAction(ISD::VAEND , MVT::Other, Expand); 332 if (Subtarget->is64Bit()) 333 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 334 else 335 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 336 337 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 338 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 339 if (Subtarget->is64Bit()) 340 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 341 if (Subtarget->isTargetCygMing()) 342 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 343 else 344 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 345 346 if (X86ScalarSSEf64) { 347 // f32 and f64 use SSE. 348 // Set up the FP register classes. 349 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 350 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 351 352 // Use ANDPD to simulate FABS. 353 setOperationAction(ISD::FABS , MVT::f64, Custom); 354 setOperationAction(ISD::FABS , MVT::f32, Custom); 355 356 // Use XORP to simulate FNEG. 357 setOperationAction(ISD::FNEG , MVT::f64, Custom); 358 setOperationAction(ISD::FNEG , MVT::f32, Custom); 359 360 // Use ANDPD and ORPD to simulate FCOPYSIGN. 361 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 362 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 363 364 // We don't support sin/cos/fmod 365 setOperationAction(ISD::FSIN , MVT::f64, Expand); 366 setOperationAction(ISD::FCOS , MVT::f64, Expand); 367 setOperationAction(ISD::FSIN , MVT::f32, Expand); 368 setOperationAction(ISD::FCOS , MVT::f32, Expand); 369 370 // Expand FP immediates into loads from the stack, except for the special 371 // cases we handle. 372 addLegalFPImmediate(APFloat(+0.0)); // xorpd 373 addLegalFPImmediate(APFloat(+0.0f)); // xorps 374 375 // Floating truncations from f80 and extensions to f80 go through memory. 376 // If optimizing, we lie about this though and handle it in 377 // InstructionSelectPreprocess so that dagcombine2 can hack on these. 378 if (Fast) { 379 setConvertAction(MVT::f32, MVT::f80, Expand); 380 setConvertAction(MVT::f64, MVT::f80, Expand); 381 setConvertAction(MVT::f80, MVT::f32, Expand); 382 setConvertAction(MVT::f80, MVT::f64, Expand); 383 } 384 } else if (X86ScalarSSEf32) { 385 // Use SSE for f32, x87 for f64. 386 // Set up the FP register classes. 387 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 388 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 389 390 // Use ANDPS to simulate FABS. 391 setOperationAction(ISD::FABS , MVT::f32, Custom); 392 393 // Use XORP to simulate FNEG. 394 setOperationAction(ISD::FNEG , MVT::f32, Custom); 395 396 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 397 398 // Use ANDPS and ORPS to simulate FCOPYSIGN. 399 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 400 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 401 402 // We don't support sin/cos/fmod 403 setOperationAction(ISD::FSIN , MVT::f32, Expand); 404 setOperationAction(ISD::FCOS , MVT::f32, Expand); 405 406 // Special cases we handle for FP constants. 407 addLegalFPImmediate(APFloat(+0.0f)); // xorps 408 addLegalFPImmediate(APFloat(+0.0)); // FLD0 409 addLegalFPImmediate(APFloat(+1.0)); // FLD1 410 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 411 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 412 413 // SSE <-> X87 conversions go through memory. If optimizing, we lie about 414 // this though and handle it in InstructionSelectPreprocess so that 415 // dagcombine2 can hack on these. 416 if (Fast) { 417 setConvertAction(MVT::f32, MVT::f64, Expand); 418 setConvertAction(MVT::f32, MVT::f80, Expand); 419 setConvertAction(MVT::f80, MVT::f32, Expand); 420 setConvertAction(MVT::f64, MVT::f32, Expand); 421 // And x87->x87 truncations also. 422 setConvertAction(MVT::f80, MVT::f64, Expand); 423 } 424 425 if (!UnsafeFPMath) { 426 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 427 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 428 } 429 } else { 430 // f32 and f64 in x87. 431 // Set up the FP register classes. 432 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 433 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 434 435 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 436 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 437 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 438 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 439 440 // Floating truncations go through memory. If optimizing, we lie about 441 // this though and handle it in InstructionSelectPreprocess so that 442 // dagcombine2 can hack on these. 443 if (Fast) { 444 setConvertAction(MVT::f80, MVT::f32, Expand); 445 setConvertAction(MVT::f64, MVT::f32, Expand); 446 setConvertAction(MVT::f80, MVT::f64, Expand); 447 } 448 449 if (!UnsafeFPMath) { 450 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 451 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 452 } 453 addLegalFPImmediate(APFloat(+0.0)); // FLD0 454 addLegalFPImmediate(APFloat(+1.0)); // FLD1 455 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 456 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 457 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 458 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 459 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 460 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 461 } 462 463 // Long double always uses X87. 464 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 465 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 466 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 467 { 468 APFloat TmpFlt(+0.0); 469 TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 470 addLegalFPImmediate(TmpFlt); // FLD0 471 TmpFlt.changeSign(); 472 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 473 APFloat TmpFlt2(+1.0); 474 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 475 addLegalFPImmediate(TmpFlt2); // FLD1 476 TmpFlt2.changeSign(); 477 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 478 } 479 480 if (!UnsafeFPMath) { 481 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 482 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 483 } 484 485 // Always use a library call for pow. 486 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 487 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 488 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 489 490 // First set operation action for all vector types to expand. Then we 491 // will selectively turn on ones that can be effectively codegen'd. 492 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 493 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 494 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); 495 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); 496 setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); 497 setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); 498 setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); 499 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 500 setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); 501 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 502 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 503 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 504 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 505 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 506 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); 507 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); 508 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 509 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 510 setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand); 511 setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand); 512 setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand); 513 setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand); 514 setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand); 515 setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand); 516 setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand); 517 setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand); 518 setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand); 519 setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand); 520 setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand); 521 setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand); 522 setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand); 523 setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand); 524 setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand); 525 setOperationAction(ISD::SHL, (MVT::ValueType)VT, Expand); 526 setOperationAction(ISD::SRA, (MVT::ValueType)VT, Expand); 527 setOperationAction(ISD::SRL, (MVT::ValueType)VT, Expand); 528 setOperationAction(ISD::ROTL, (MVT::ValueType)VT, Expand); 529 setOperationAction(ISD::ROTR, (MVT::ValueType)VT, Expand); 530 setOperationAction(ISD::BSWAP, (MVT::ValueType)VT, Expand); 531 } 532 533 if (Subtarget->hasMMX()) { 534 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass); 535 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass); 536 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass); 537 addRegisterClass(MVT::v1i64, X86::VR64RegisterClass); 538 539 // FIXME: add MMX packed arithmetics 540 541 setOperationAction(ISD::ADD, MVT::v8i8, Legal); 542 setOperationAction(ISD::ADD, MVT::v4i16, Legal); 543 setOperationAction(ISD::ADD, MVT::v2i32, Legal); 544 setOperationAction(ISD::ADD, MVT::v1i64, Legal); 545 546 setOperationAction(ISD::SUB, MVT::v8i8, Legal); 547 setOperationAction(ISD::SUB, MVT::v4i16, Legal); 548 setOperationAction(ISD::SUB, MVT::v2i32, Legal); 549 setOperationAction(ISD::SUB, MVT::v1i64, Legal); 550 551 setOperationAction(ISD::MULHS, MVT::v4i16, Legal); 552 setOperationAction(ISD::MUL, MVT::v4i16, Legal); 553 554 setOperationAction(ISD::AND, MVT::v8i8, Promote); 555 AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64); 556 setOperationAction(ISD::AND, MVT::v4i16, Promote); 557 AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64); 558 setOperationAction(ISD::AND, MVT::v2i32, Promote); 559 AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64); 560 setOperationAction(ISD::AND, MVT::v1i64, Legal); 561 562 setOperationAction(ISD::OR, MVT::v8i8, Promote); 563 AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64); 564 setOperationAction(ISD::OR, MVT::v4i16, Promote); 565 AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64); 566 setOperationAction(ISD::OR, MVT::v2i32, Promote); 567 AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64); 568 setOperationAction(ISD::OR, MVT::v1i64, Legal); 569 570 setOperationAction(ISD::XOR, MVT::v8i8, Promote); 571 AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64); 572 setOperationAction(ISD::XOR, MVT::v4i16, Promote); 573 AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64); 574 setOperationAction(ISD::XOR, MVT::v2i32, Promote); 575 AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64); 576 setOperationAction(ISD::XOR, MVT::v1i64, Legal); 577 578 setOperationAction(ISD::LOAD, MVT::v8i8, Promote); 579 AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64); 580 setOperationAction(ISD::LOAD, MVT::v4i16, Promote); 581 AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64); 582 setOperationAction(ISD::LOAD, MVT::v2i32, Promote); 583 AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64); 584 setOperationAction(ISD::LOAD, MVT::v1i64, Legal); 585 586 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); 587 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 588 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); 589 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); 590 591 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 592 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 593 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); 594 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); 595 596 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom); 597 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom); 598 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom); 599 } 600 601 if (Subtarget->hasSSE1()) { 602 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 603 604 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 605 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 606 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 607 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 608 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 609 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 610 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 611 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 612 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 613 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 614 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 615 } 616 617 if (Subtarget->hasSSE2()) { 618 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 619 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 620 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 621 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 622 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 623 624 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 625 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 626 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 627 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 628 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 629 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 630 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 631 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 632 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 633 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 634 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 635 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 636 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 637 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 638 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 639 640 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 641 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 642 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 643 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 644 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 645 646 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 647 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 648 // Do not attempt to custom lower non-power-of-2 vectors 649 if (!isPowerOf2_32(MVT::getVectorNumElements(VT))) 650 continue; 651 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); 652 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); 653 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); 654 } 655 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 656 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 657 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 658 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 659 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 660 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 661 if (Subtarget->is64Bit()) { 662 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 663 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 664 } 665 666 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 667 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 668 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); 669 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); 670 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); 671 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); 672 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); 673 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); 674 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); 675 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); 676 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 677 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); 678 } 679 680 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 681 682 // Custom lower v2i64 and v2f64 selects. 683 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 684 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 685 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 686 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 687 } 688 689 if (Subtarget->hasSSE41()) { 690 // FIXME: Do we need to handle scalar-to-vector here? 691 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 692 693 // i8 and i16 vectors are custom , because the source register and source 694 // source memory operand types are not the same width. f32 vectors are 695 // custom since the immediate controlling the insert encodes additional 696 // information. 697 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 698 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 699 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Legal); 700 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 701 702 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 703 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 704 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 705 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 706 707 if (Subtarget->is64Bit()) { 708 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal); 709 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 710 } 711 } 712 713 // We want to custom lower some of our intrinsics. 714 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 715 716 // We have target-specific dag combine patterns for the following nodes: 717 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 718 setTargetDAGCombine(ISD::BUILD_VECTOR); 719 setTargetDAGCombine(ISD::SELECT); 720 setTargetDAGCombine(ISD::STORE); 721 722 computeRegisterProperties(); 723 724 // FIXME: These should be based on subtarget info. Plus, the values should 725 // be smaller when we are in optimizing for size mode. 726 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores 727 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores 728 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores 729 allowUnalignedMemoryAccesses = true; // x86 supports it! 730 setPrefLoopAlignment(16); 731} 732 733 734MVT::ValueType 735X86TargetLowering::getSetCCResultType(const SDOperand &) const { 736 return MVT::i8; 737} 738 739 740/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 741/// the desired ByVal argument alignment. 742static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) { 743 if (MaxAlign == 16) 744 return; 745 if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 746 if (VTy->getBitWidth() == 128) 747 MaxAlign = 16; 748 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 749 unsigned EltAlign = 0; 750 getMaxByValAlign(ATy->getElementType(), EltAlign); 751 if (EltAlign > MaxAlign) 752 MaxAlign = EltAlign; 753 } else if (const StructType *STy = dyn_cast<StructType>(Ty)) { 754 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 755 unsigned EltAlign = 0; 756 getMaxByValAlign(STy->getElementType(i), EltAlign); 757 if (EltAlign > MaxAlign) 758 MaxAlign = EltAlign; 759 if (MaxAlign == 16) 760 break; 761 } 762 } 763 return; 764} 765 766/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 767/// function arguments in the caller parameter area. For X86, aggregates 768/// that contain SSE vectors are placed at 16-byte boundaries while the rest 769/// are at 4-byte boundaries. 770unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const { 771 if (Subtarget->is64Bit()) 772 return getTargetData()->getABITypeAlignment(Ty); 773 unsigned Align = 4; 774 if (Subtarget->hasSSE1()) 775 getMaxByValAlign(Ty, Align); 776 return Align; 777} 778 779/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 780/// jumptable. 781SDOperand X86TargetLowering::getPICJumpTableRelocBase(SDOperand Table, 782 SelectionDAG &DAG) const { 783 if (usesGlobalOffsetTable()) 784 return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy()); 785 if (!Subtarget->isPICStyleRIPRel()) 786 return DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()); 787 return Table; 788} 789 790//===----------------------------------------------------------------------===// 791// Return Value Calling Convention Implementation 792//===----------------------------------------------------------------------===// 793 794#include "X86GenCallingConv.inc" 795 796/// LowerRET - Lower an ISD::RET node. 797SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { 798 assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args"); 799 800 SmallVector<CCValAssign, 16> RVLocs; 801 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 802 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 803 CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs); 804 CCInfo.AnalyzeReturn(Op.Val, RetCC_X86); 805 806 // If this is the first return lowered for this function, add the regs to the 807 // liveout set for the function. 808 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 809 for (unsigned i = 0; i != RVLocs.size(); ++i) 810 if (RVLocs[i].isRegLoc()) 811 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 812 } 813 SDOperand Chain = Op.getOperand(0); 814 815 // Handle tail call return. 816 Chain = GetPossiblePreceedingTailCall(Chain, X86ISD::TAILCALL); 817 if (Chain.getOpcode() == X86ISD::TAILCALL) { 818 SDOperand TailCall = Chain; 819 SDOperand TargetAddress = TailCall.getOperand(1); 820 SDOperand StackAdjustment = TailCall.getOperand(2); 821 assert(((TargetAddress.getOpcode() == ISD::Register && 822 (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::ECX || 823 cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) || 824 TargetAddress.getOpcode() == ISD::TargetExternalSymbol || 825 TargetAddress.getOpcode() == ISD::TargetGlobalAddress) && 826 "Expecting an global address, external symbol, or register"); 827 assert(StackAdjustment.getOpcode() == ISD::Constant && 828 "Expecting a const value"); 829 830 SmallVector<SDOperand,8> Operands; 831 Operands.push_back(Chain.getOperand(0)); 832 Operands.push_back(TargetAddress); 833 Operands.push_back(StackAdjustment); 834 // Copy registers used by the call. Last operand is a flag so it is not 835 // copied. 836 for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) { 837 Operands.push_back(Chain.getOperand(i)); 838 } 839 return DAG.getNode(X86ISD::TC_RETURN, MVT::Other, &Operands[0], 840 Operands.size()); 841 } 842 843 // Regular return. 844 SDOperand Flag; 845 846 SmallVector<SDOperand, 6> RetOps; 847 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 848 // Operand #1 = Bytes To Pop 849 RetOps.push_back(DAG.getConstant(getBytesToPopOnReturn(), MVT::i16)); 850 851 // Copy the result values into the output registers. 852 for (unsigned i = 0; i != RVLocs.size(); ++i) { 853 CCValAssign &VA = RVLocs[i]; 854 assert(VA.isRegLoc() && "Can only return in registers!"); 855 SDOperand ValToCopy = Op.getOperand(i*2+1); 856 857 // Returns in ST0/ST1 are handled specially: these are pushed as operands to 858 // the RET instruction and handled by the FP Stackifier. 859 if (RVLocs[i].getLocReg() == X86::ST0 || 860 RVLocs[i].getLocReg() == X86::ST1) { 861 // If this is a copy from an xmm register to ST(0), use an FPExtend to 862 // change the value to the FP stack register class. 863 if (isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) 864 ValToCopy = DAG.getNode(ISD::FP_EXTEND, MVT::f80, ValToCopy); 865 RetOps.push_back(ValToCopy); 866 // Don't emit a copytoreg. 867 continue; 868 } 869 870 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), ValToCopy, Flag); 871 Flag = Chain.getValue(1); 872 } 873 874 // The x86-64 ABI for returning structs by value requires that we copy 875 // the sret argument into %rax for the return. We saved the argument into 876 // a virtual register in the entry block, so now we copy the value out 877 // and into %rax. 878 if (Subtarget->is64Bit() && 879 DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { 880 MachineFunction &MF = DAG.getMachineFunction(); 881 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 882 unsigned Reg = FuncInfo->getSRetReturnReg(); 883 if (!Reg) { 884 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); 885 FuncInfo->setSRetReturnReg(Reg); 886 } 887 SDOperand Val = DAG.getCopyFromReg(Chain, Reg, getPointerTy()); 888 889 Chain = DAG.getCopyToReg(Chain, X86::RAX, Val, Flag); 890 Flag = Chain.getValue(1); 891 } 892 893 RetOps[0] = Chain; // Update chain. 894 895 // Add the flag if we have it. 896 if (Flag.Val) 897 RetOps.push_back(Flag); 898 899 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, &RetOps[0], RetOps.size()); 900} 901 902 903/// LowerCallResult - Lower the result values of an ISD::CALL into the 904/// appropriate copies out of appropriate physical registers. This assumes that 905/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call 906/// being lowered. The returns a SDNode with the same number of values as the 907/// ISD::CALL. 908SDNode *X86TargetLowering:: 909LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, 910 unsigned CallingConv, SelectionDAG &DAG) { 911 912 // Assign locations to each value returned by this call. 913 SmallVector<CCValAssign, 16> RVLocs; 914 bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0; 915 CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs); 916 CCInfo.AnalyzeCallResult(TheCall, RetCC_X86); 917 918 SmallVector<SDOperand, 8> ResultVals; 919 920 // Copy all of the result registers out of their specified physreg. 921 for (unsigned i = 0; i != RVLocs.size(); ++i) { 922 MVT::ValueType CopyVT = RVLocs[i].getValVT(); 923 924 // If this is a call to a function that returns an fp value on the floating 925 // point stack, but where we prefer to use the value in xmm registers, copy 926 // it out as F80 and use a truncate to move it from fp stack reg to xmm reg. 927 if (RVLocs[i].getLocReg() == X86::ST0 && 928 isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) { 929 CopyVT = MVT::f80; 930 } 931 932 Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(), 933 CopyVT, InFlag).getValue(1); 934 SDOperand Val = Chain.getValue(0); 935 InFlag = Chain.getValue(2); 936 937 if (CopyVT != RVLocs[i].getValVT()) { 938 // Round the F80 the right size, which also moves to the appropriate xmm 939 // register. 940 Val = DAG.getNode(ISD::FP_ROUND, RVLocs[i].getValVT(), Val, 941 // This truncation won't change the value. 942 DAG.getIntPtrConstant(1)); 943 } 944 945 ResultVals.push_back(Val); 946 } 947 948 // Merge everything together with a MERGE_VALUES node. 949 ResultVals.push_back(Chain); 950 return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(), 951 &ResultVals[0], ResultVals.size()).Val; 952} 953 954 955//===----------------------------------------------------------------------===// 956// C & StdCall & Fast Calling Convention implementation 957//===----------------------------------------------------------------------===// 958// StdCall calling convention seems to be standard for many Windows' API 959// routines and around. It differs from C calling convention just a little: 960// callee should clean up the stack, not caller. Symbols should be also 961// decorated in some fancy way :) It doesn't support any vector arguments. 962// For info on fast calling convention see Fast Calling Convention (tail call) 963// implementation LowerX86_32FastCCCallTo. 964 965/// AddLiveIn - This helper function adds the specified physical register to the 966/// MachineFunction as a live in value. It also creates a corresponding virtual 967/// register for it. 968static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg, 969 const TargetRegisterClass *RC) { 970 assert(RC->contains(PReg) && "Not the correct regclass!"); 971 unsigned VReg = MF.getRegInfo().createVirtualRegister(RC); 972 MF.getRegInfo().addLiveIn(PReg, VReg); 973 return VReg; 974} 975 976/// CallIsStructReturn - Determines whether a CALL node uses struct return 977/// semantics. 978static bool CallIsStructReturn(SDOperand Op) { 979 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 980 if (!NumOps) 981 return false; 982 983 return cast<ARG_FLAGSSDNode>(Op.getOperand(6))->getArgFlags().isSRet(); 984} 985 986/// ArgsAreStructReturn - Determines whether a FORMAL_ARGUMENTS node uses struct 987/// return semantics. 988static bool ArgsAreStructReturn(SDOperand Op) { 989 unsigned NumArgs = Op.Val->getNumValues() - 1; 990 if (!NumArgs) 991 return false; 992 993 return cast<ARG_FLAGSSDNode>(Op.getOperand(3))->getArgFlags().isSRet(); 994} 995 996/// IsCalleePop - Determines whether a CALL or FORMAL_ARGUMENTS node requires 997/// the callee to pop its own arguments. Callee pop is necessary to support tail 998/// calls. 999bool X86TargetLowering::IsCalleePop(SDOperand Op) { 1000 bool IsVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1001 if (IsVarArg) 1002 return false; 1003 1004 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { 1005 default: 1006 return false; 1007 case CallingConv::X86_StdCall: 1008 return !Subtarget->is64Bit(); 1009 case CallingConv::X86_FastCall: 1010 return !Subtarget->is64Bit(); 1011 case CallingConv::Fast: 1012 return PerformTailCallOpt; 1013 } 1014} 1015 1016/// CCAssignFnForNode - Selects the correct CCAssignFn for a CALL or 1017/// FORMAL_ARGUMENTS node. 1018CCAssignFn *X86TargetLowering::CCAssignFnForNode(SDOperand Op) const { 1019 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1020 1021 if (Subtarget->is64Bit()) { 1022 if (Subtarget->isTargetWin64()) 1023 return CC_X86_Win64_C; 1024 else { 1025 if (CC == CallingConv::Fast && PerformTailCallOpt) 1026 return CC_X86_64_TailCall; 1027 else 1028 return CC_X86_64_C; 1029 } 1030 } 1031 1032 if (CC == CallingConv::X86_FastCall) 1033 return CC_X86_32_FastCall; 1034 else if (CC == CallingConv::Fast && PerformTailCallOpt) 1035 return CC_X86_32_TailCall; 1036 else 1037 return CC_X86_32_C; 1038} 1039 1040/// NameDecorationForFORMAL_ARGUMENTS - Selects the appropriate decoration to 1041/// apply to a MachineFunction containing a given FORMAL_ARGUMENTS node. 1042NameDecorationStyle 1043X86TargetLowering::NameDecorationForFORMAL_ARGUMENTS(SDOperand Op) { 1044 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1045 if (CC == CallingConv::X86_FastCall) 1046 return FastCall; 1047 else if (CC == CallingConv::X86_StdCall) 1048 return StdCall; 1049 return None; 1050} 1051 1052 1053/// CallRequiresGOTInRegister - Check whether the call requires the GOT pointer 1054/// in a register before calling. 1055bool X86TargetLowering::CallRequiresGOTPtrInReg(bool Is64Bit, bool IsTailCall) { 1056 return !IsTailCall && !Is64Bit && 1057 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1058 Subtarget->isPICStyleGOT(); 1059} 1060 1061/// CallRequiresFnAddressInReg - Check whether the call requires the function 1062/// address to be loaded in a register. 1063bool 1064X86TargetLowering::CallRequiresFnAddressInReg(bool Is64Bit, bool IsTailCall) { 1065 return !Is64Bit && IsTailCall && 1066 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1067 Subtarget->isPICStyleGOT(); 1068} 1069 1070/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1071/// by "Src" to address "Dst" with size and alignment information specified by 1072/// the specific parameter attribute. The copy will be passed as a byval 1073/// function parameter. 1074static SDOperand 1075CreateCopyOfByValArgument(SDOperand Src, SDOperand Dst, SDOperand Chain, 1076 ISD::ArgFlagsTy Flags, SelectionDAG &DAG) { 1077 SDOperand SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1078 return DAG.getMemcpy(Chain, Dst, Src, SizeNode, Flags.getByValAlign(), 1079 /*AlwaysInline=*/true, NULL, 0, NULL, 0); 1080} 1081 1082SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG, 1083 const CCValAssign &VA, 1084 MachineFrameInfo *MFI, 1085 unsigned CC, 1086 SDOperand Root, unsigned i) { 1087 // Create the nodes corresponding to a load from this parameter slot. 1088 ISD::ArgFlagsTy Flags = 1089 cast<ARG_FLAGSSDNode>(Op.getOperand(3 + i))->getArgFlags(); 1090 bool AlwaysUseMutable = (CC==CallingConv::Fast) && PerformTailCallOpt; 1091 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); 1092 1093 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1094 // changed with more analysis. 1095 // In case of tail call optimization mark all arguments mutable. Since they 1096 // could be overwritten by lowering of arguments in case of a tail call. 1097 int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, 1098 VA.getLocMemOffset(), isImmutable); 1099 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 1100 if (Flags.isByVal()) 1101 return FIN; 1102 return DAG.getLoad(VA.getValVT(), Root, FIN, 1103 PseudoSourceValue::getFixedStack(), FI); 1104} 1105 1106SDOperand 1107X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { 1108 MachineFunction &MF = DAG.getMachineFunction(); 1109 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1110 1111 const Function* Fn = MF.getFunction(); 1112 if (Fn->hasExternalLinkage() && 1113 Subtarget->isTargetCygMing() && 1114 Fn->getName() == "main") 1115 FuncInfo->setForceFramePointer(true); 1116 1117 // Decorate the function name. 1118 FuncInfo->setDecorationStyle(NameDecorationForFORMAL_ARGUMENTS(Op)); 1119 1120 MachineFrameInfo *MFI = MF.getFrameInfo(); 1121 SDOperand Root = Op.getOperand(0); 1122 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1123 unsigned CC = MF.getFunction()->getCallingConv(); 1124 bool Is64Bit = Subtarget->is64Bit(); 1125 bool IsWin64 = Subtarget->isTargetWin64(); 1126 1127 assert(!(isVarArg && CC == CallingConv::Fast) && 1128 "Var args not supported with calling convention fastcc"); 1129 1130 // Assign locations to all of the incoming arguments. 1131 SmallVector<CCValAssign, 16> ArgLocs; 1132 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1133 CCInfo.AnalyzeFormalArguments(Op.Val, CCAssignFnForNode(Op)); 1134 1135 SmallVector<SDOperand, 8> ArgValues; 1136 unsigned LastVal = ~0U; 1137 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1138 CCValAssign &VA = ArgLocs[i]; 1139 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1140 // places. 1141 assert(VA.getValNo() != LastVal && 1142 "Don't support value assigned to multiple locs yet"); 1143 LastVal = VA.getValNo(); 1144 1145 if (VA.isRegLoc()) { 1146 MVT::ValueType RegVT = VA.getLocVT(); 1147 TargetRegisterClass *RC; 1148 if (RegVT == MVT::i32) 1149 RC = X86::GR32RegisterClass; 1150 else if (Is64Bit && RegVT == MVT::i64) 1151 RC = X86::GR64RegisterClass; 1152 else if (RegVT == MVT::f32) 1153 RC = X86::FR32RegisterClass; 1154 else if (RegVT == MVT::f64) 1155 RC = X86::FR64RegisterClass; 1156 else if (MVT::isVector(RegVT) && MVT::getSizeInBits(RegVT) == 128) 1157 RC = X86::VR128RegisterClass; 1158 else if (MVT::isVector(RegVT)) { 1159 assert(MVT::getSizeInBits(RegVT) == 64); 1160 if (!Is64Bit) 1161 RC = X86::VR64RegisterClass; // MMX values are passed in MMXs. 1162 else { 1163 // Darwin calling convention passes MMX values in either GPRs or 1164 // XMMs in x86-64. Other targets pass them in memory. 1165 if (RegVT != MVT::v1i64 && Subtarget->hasSSE2()) { 1166 RC = X86::VR128RegisterClass; // MMX values are passed in XMMs. 1167 RegVT = MVT::v2i64; 1168 } else { 1169 RC = X86::GR64RegisterClass; // v1i64 values are passed in GPRs. 1170 RegVT = MVT::i64; 1171 } 1172 } 1173 } else { 1174 assert(0 && "Unknown argument type!"); 1175 } 1176 1177 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 1178 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 1179 1180 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1181 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1182 // right size. 1183 if (VA.getLocInfo() == CCValAssign::SExt) 1184 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 1185 DAG.getValueType(VA.getValVT())); 1186 else if (VA.getLocInfo() == CCValAssign::ZExt) 1187 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 1188 DAG.getValueType(VA.getValVT())); 1189 1190 if (VA.getLocInfo() != CCValAssign::Full) 1191 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 1192 1193 // Handle MMX values passed in GPRs. 1194 if (Is64Bit && RegVT != VA.getLocVT()) { 1195 if (MVT::getSizeInBits(RegVT) == 64 && RC == X86::GR64RegisterClass) 1196 ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); 1197 else if (RC == X86::VR128RegisterClass) { 1198 ArgValue = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i64, ArgValue, 1199 DAG.getConstant(0, MVT::i64)); 1200 ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); 1201 } 1202 } 1203 1204 ArgValues.push_back(ArgValue); 1205 } else { 1206 assert(VA.isMemLoc()); 1207 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, CC, Root, i)); 1208 } 1209 } 1210 1211 // The x86-64 ABI for returning structs by value requires that we copy 1212 // the sret argument into %rax for the return. Save the argument into 1213 // a virtual register so that we can access it from the return points. 1214 if (Is64Bit && DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { 1215 MachineFunction &MF = DAG.getMachineFunction(); 1216 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1217 unsigned Reg = FuncInfo->getSRetReturnReg(); 1218 if (!Reg) { 1219 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); 1220 FuncInfo->setSRetReturnReg(Reg); 1221 } 1222 SDOperand Copy = DAG.getCopyToReg(DAG.getEntryNode(), Reg, ArgValues[0]); 1223 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, Copy, Root); 1224 } 1225 1226 unsigned StackSize = CCInfo.getNextStackOffset(); 1227 // align stack specially for tail calls 1228 if (CC == CallingConv::Fast) 1229 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1230 1231 // If the function takes variable number of arguments, make a frame index for 1232 // the start of the first vararg value... for expansion of llvm.va_start. 1233 if (isVarArg) { 1234 if (Is64Bit || CC != CallingConv::X86_FastCall) { 1235 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); 1236 } 1237 if (Is64Bit) { 1238 unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0; 1239 1240 // FIXME: We should really autogenerate these arrays 1241 static const unsigned GPR64ArgRegsWin64[] = { 1242 X86::RCX, X86::RDX, X86::R8, X86::R9 1243 }; 1244 static const unsigned XMMArgRegsWin64[] = { 1245 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 1246 }; 1247 static const unsigned GPR64ArgRegs64Bit[] = { 1248 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1249 }; 1250 static const unsigned XMMArgRegs64Bit[] = { 1251 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1252 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1253 }; 1254 const unsigned *GPR64ArgRegs, *XMMArgRegs; 1255 1256 if (IsWin64) { 1257 TotalNumIntRegs = 4; TotalNumXMMRegs = 4; 1258 GPR64ArgRegs = GPR64ArgRegsWin64; 1259 XMMArgRegs = XMMArgRegsWin64; 1260 } else { 1261 TotalNumIntRegs = 6; TotalNumXMMRegs = 8; 1262 GPR64ArgRegs = GPR64ArgRegs64Bit; 1263 XMMArgRegs = XMMArgRegs64Bit; 1264 } 1265 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 1266 TotalNumIntRegs); 1267 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 1268 TotalNumXMMRegs); 1269 1270 // For X86-64, if there are vararg parameters that are passed via 1271 // registers, then we must store them to their spots on the stack so they 1272 // may be loaded by deferencing the result of va_next. 1273 VarArgsGPOffset = NumIntRegs * 8; 1274 VarArgsFPOffset = TotalNumIntRegs * 8 + NumXMMRegs * 16; 1275 RegSaveFrameIndex = MFI->CreateStackObject(TotalNumIntRegs * 8 + 1276 TotalNumXMMRegs * 16, 16); 1277 1278 // Store the integer parameter registers. 1279 SmallVector<SDOperand, 8> MemOps; 1280 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 1281 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1282 DAG.getIntPtrConstant(VarArgsGPOffset)); 1283 for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { 1284 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], 1285 X86::GR64RegisterClass); 1286 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1287 SDOperand Store = 1288 DAG.getStore(Val.getValue(1), Val, FIN, 1289 PseudoSourceValue::getFixedStack(), 1290 RegSaveFrameIndex); 1291 MemOps.push_back(Store); 1292 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1293 DAG.getIntPtrConstant(8)); 1294 } 1295 1296 // Now store the XMM (fp + vector) parameter registers. 1297 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1298 DAG.getIntPtrConstant(VarArgsFPOffset)); 1299 for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { 1300 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 1301 X86::VR128RegisterClass); 1302 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); 1303 SDOperand Store = 1304 DAG.getStore(Val.getValue(1), Val, FIN, 1305 PseudoSourceValue::getFixedStack(), 1306 RegSaveFrameIndex); 1307 MemOps.push_back(Store); 1308 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1309 DAG.getIntPtrConstant(16)); 1310 } 1311 if (!MemOps.empty()) 1312 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 1313 &MemOps[0], MemOps.size()); 1314 } 1315 } 1316 1317 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1318 // arguments and the arguments after the retaddr has been pushed are 1319 // aligned. 1320 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1321 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1322 (StackSize & 7) == 0) 1323 StackSize += 4; 1324 1325 ArgValues.push_back(Root); 1326 1327 // Some CCs need callee pop. 1328 if (IsCalleePop(Op)) { 1329 BytesToPopOnReturn = StackSize; // Callee pops everything. 1330 BytesCallerReserves = 0; 1331 } else { 1332 BytesToPopOnReturn = 0; // Callee pops nothing. 1333 // If this is an sret function, the return should pop the hidden pointer. 1334 if (!Is64Bit && ArgsAreStructReturn(Op)) 1335 BytesToPopOnReturn = 4; 1336 BytesCallerReserves = StackSize; 1337 } 1338 1339 if (!Is64Bit) { 1340 RegSaveFrameIndex = 0xAAAAAAA; // RegSaveFrameIndex is X86-64 only. 1341 if (CC == CallingConv::X86_FastCall) 1342 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 1343 } 1344 1345 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 1346 1347 // Return the new list of results. 1348 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1349 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1350} 1351 1352SDOperand 1353X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG, 1354 const SDOperand &StackPtr, 1355 const CCValAssign &VA, 1356 SDOperand Chain, 1357 SDOperand Arg) { 1358 unsigned LocMemOffset = VA.getLocMemOffset(); 1359 SDOperand PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1360 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1361 ISD::ArgFlagsTy Flags = 1362 cast<ARG_FLAGSSDNode>(Op.getOperand(6+2*VA.getValNo()))->getArgFlags(); 1363 if (Flags.isByVal()) { 1364 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG); 1365 } 1366 return DAG.getStore(Chain, Arg, PtrOff, 1367 PseudoSourceValue::getStack(), LocMemOffset); 1368} 1369 1370/// EmitTailCallLoadRetAddr - Emit a load of return adress if tail call 1371/// optimization is performed and it is required. 1372SDOperand 1373X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, 1374 SDOperand &OutRetAddr, 1375 SDOperand Chain, 1376 bool IsTailCall, 1377 bool Is64Bit, 1378 int FPDiff) { 1379 if (!IsTailCall || FPDiff==0) return Chain; 1380 1381 // Adjust the Return address stack slot. 1382 MVT::ValueType VT = getPointerTy(); 1383 OutRetAddr = getReturnAddressFrameIndex(DAG); 1384 // Load the "old" Return address. 1385 OutRetAddr = DAG.getLoad(VT, Chain,OutRetAddr, NULL, 0); 1386 return SDOperand(OutRetAddr.Val, 1); 1387} 1388 1389/// EmitTailCallStoreRetAddr - Emit a store of the return adress if tail call 1390/// optimization is performed and it is required (FPDiff!=0). 1391static SDOperand 1392EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, 1393 SDOperand Chain, SDOperand RetAddrFrIdx, 1394 bool Is64Bit, int FPDiff) { 1395 // Store the return address to the appropriate stack slot. 1396 if (!FPDiff) return Chain; 1397 // Calculate the new stack slot for the return address. 1398 int SlotSize = Is64Bit ? 8 : 4; 1399 int NewReturnAddrFI = 1400 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize); 1401 MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32; 1402 SDOperand NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 1403 Chain = DAG.getStore(Chain, RetAddrFrIdx, NewRetAddrFrIdx, 1404 PseudoSourceValue::getFixedStack(), NewReturnAddrFI); 1405 return Chain; 1406} 1407 1408SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { 1409 MachineFunction &MF = DAG.getMachineFunction(); 1410 SDOperand Chain = Op.getOperand(0); 1411 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1412 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1413 bool IsTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0 1414 && CC == CallingConv::Fast && PerformTailCallOpt; 1415 SDOperand Callee = Op.getOperand(4); 1416 bool Is64Bit = Subtarget->is64Bit(); 1417 bool IsStructRet = CallIsStructReturn(Op); 1418 1419 assert(!(isVarArg && CC == CallingConv::Fast) && 1420 "Var args not supported with calling convention fastcc"); 1421 1422 // Analyze operands of the call, assigning locations to each operand. 1423 SmallVector<CCValAssign, 16> ArgLocs; 1424 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1425 CCInfo.AnalyzeCallOperands(Op.Val, CCAssignFnForNode(Op)); 1426 1427 // Get a count of how many bytes are to be pushed on the stack. 1428 unsigned NumBytes = CCInfo.getNextStackOffset(); 1429 if (CC == CallingConv::Fast) 1430 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 1431 1432 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1433 // arguments and the arguments after the retaddr has been pushed are aligned. 1434 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1435 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1436 (NumBytes & 7) == 0) 1437 NumBytes += 4; 1438 1439 int FPDiff = 0; 1440 if (IsTailCall) { 1441 // Lower arguments at fp - stackoffset + fpdiff. 1442 unsigned NumBytesCallerPushed = 1443 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 1444 FPDiff = NumBytesCallerPushed - NumBytes; 1445 1446 // Set the delta of movement of the returnaddr stackslot. 1447 // But only set if delta is greater than previous delta. 1448 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 1449 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 1450 } 1451 1452 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes)); 1453 1454 SDOperand RetAddrFrIdx; 1455 // Load return adress for tail calls. 1456 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, IsTailCall, Is64Bit, 1457 FPDiff); 1458 1459 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1460 SmallVector<SDOperand, 8> MemOpChains; 1461 SDOperand StackPtr; 1462 1463 // Walk the register/memloc assignments, inserting copies/loads. In the case 1464 // of tail call optimization arguments are handle later. 1465 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1466 CCValAssign &VA = ArgLocs[i]; 1467 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1468 bool isByVal = cast<ARG_FLAGSSDNode>(Op.getOperand(6+2*VA.getValNo()))-> 1469 getArgFlags().isByVal(); 1470 1471 // Promote the value if needed. 1472 switch (VA.getLocInfo()) { 1473 default: assert(0 && "Unknown loc info!"); 1474 case CCValAssign::Full: break; 1475 case CCValAssign::SExt: 1476 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1477 break; 1478 case CCValAssign::ZExt: 1479 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1480 break; 1481 case CCValAssign::AExt: 1482 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1483 break; 1484 } 1485 1486 if (VA.isRegLoc()) { 1487 if (Is64Bit) { 1488 MVT::ValueType RegVT = VA.getLocVT(); 1489 if (MVT::isVector(RegVT) && MVT::getSizeInBits(RegVT) == 64) 1490 switch (VA.getLocReg()) { 1491 default: 1492 break; 1493 case X86::RDI: case X86::RSI: case X86::RDX: case X86::RCX: 1494 case X86::R8: { 1495 // Special case: passing MMX values in GPR registers. 1496 Arg = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Arg); 1497 break; 1498 } 1499 case X86::XMM0: case X86::XMM1: case X86::XMM2: case X86::XMM3: 1500 case X86::XMM4: case X86::XMM5: case X86::XMM6: case X86::XMM7: { 1501 // Special case: passing MMX values in XMM registers. 1502 Arg = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Arg); 1503 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Arg); 1504 Arg = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64, 1505 DAG.getNode(ISD::UNDEF, MVT::v2i64), Arg, 1506 getMOVLMask(2, DAG)); 1507 break; 1508 } 1509 } 1510 } 1511 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1512 } else { 1513 if (!IsTailCall || (IsTailCall && isByVal)) { 1514 assert(VA.isMemLoc()); 1515 if (StackPtr.Val == 0) 1516 StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy()); 1517 1518 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1519 Arg)); 1520 } 1521 } 1522 } 1523 1524 if (!MemOpChains.empty()) 1525 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1526 &MemOpChains[0], MemOpChains.size()); 1527 1528 // Build a sequence of copy-to-reg nodes chained together with token chain 1529 // and flag operands which copy the outgoing args into registers. 1530 SDOperand InFlag; 1531 // Tail call byval lowering might overwrite argument registers so in case of 1532 // tail call optimization the copies to registers are lowered later. 1533 if (!IsTailCall) 1534 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1535 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1536 InFlag); 1537 InFlag = Chain.getValue(1); 1538 } 1539 1540 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1541 // GOT pointer. 1542 if (CallRequiresGOTPtrInReg(Is64Bit, IsTailCall)) { 1543 Chain = DAG.getCopyToReg(Chain, X86::EBX, 1544 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 1545 InFlag); 1546 InFlag = Chain.getValue(1); 1547 } 1548 // If we are tail calling and generating PIC/GOT style code load the address 1549 // of the callee into ecx. The value in ecx is used as target of the tail 1550 // jump. This is done to circumvent the ebx/callee-saved problem for tail 1551 // calls on PIC/GOT architectures. Normally we would just put the address of 1552 // GOT into ebx and then call target@PLT. But for tail callss ebx would be 1553 // restored (since ebx is callee saved) before jumping to the target@PLT. 1554 if (CallRequiresFnAddressInReg(Is64Bit, IsTailCall)) { 1555 // Note: The actual moving to ecx is done further down. 1556 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 1557 if (G && !G->getGlobal()->hasHiddenVisibility() && 1558 !G->getGlobal()->hasProtectedVisibility()) 1559 Callee = LowerGlobalAddress(Callee, DAG); 1560 else if (isa<ExternalSymbolSDNode>(Callee)) 1561 Callee = LowerExternalSymbol(Callee,DAG); 1562 } 1563 1564 if (Is64Bit && isVarArg) { 1565 // From AMD64 ABI document: 1566 // For calls that may call functions that use varargs or stdargs 1567 // (prototype-less calls or calls to functions containing ellipsis (...) in 1568 // the declaration) %al is used as hidden argument to specify the number 1569 // of SSE registers used. The contents of %al do not need to match exactly 1570 // the number of registers, but must be an ubound on the number of SSE 1571 // registers used and is in the range 0 - 8 inclusive. 1572 1573 // FIXME: Verify this on Win64 1574 // Count the number of XMM registers allocated. 1575 static const unsigned XMMArgRegs[] = { 1576 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1577 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1578 }; 1579 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1580 1581 Chain = DAG.getCopyToReg(Chain, X86::AL, 1582 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 1583 InFlag = Chain.getValue(1); 1584 } 1585 1586 1587 // For tail calls lower the arguments to the 'real' stack slot. 1588 if (IsTailCall) { 1589 SmallVector<SDOperand, 8> MemOpChains2; 1590 SDOperand FIN; 1591 int FI = 0; 1592 // Do not flag preceeding copytoreg stuff together with the following stuff. 1593 InFlag = SDOperand(); 1594 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1595 CCValAssign &VA = ArgLocs[i]; 1596 if (!VA.isRegLoc()) { 1597 assert(VA.isMemLoc()); 1598 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1599 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1600 ISD::ArgFlagsTy Flags = 1601 cast<ARG_FLAGSSDNode>(FlagsOp)->getArgFlags(); 1602 // Create frame index. 1603 int32_t Offset = VA.getLocMemOffset()+FPDiff; 1604 uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8; 1605 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); 1606 FIN = DAG.getFrameIndex(FI, getPointerTy()); 1607 1608 if (Flags.isByVal()) { 1609 // Copy relative to framepointer. 1610 SDOperand Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 1611 if (StackPtr.Val == 0) 1612 StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy()); 1613 Source = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, Source); 1614 1615 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, Chain, 1616 Flags, DAG)); 1617 } else { 1618 // Store relative to framepointer. 1619 MemOpChains2.push_back( 1620 DAG.getStore(Chain, Arg, FIN, 1621 PseudoSourceValue::getFixedStack(), FI)); 1622 } 1623 } 1624 } 1625 1626 if (!MemOpChains2.empty()) 1627 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1628 &MemOpChains2[0], MemOpChains2.size()); 1629 1630 // Copy arguments to their registers. 1631 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1632 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1633 InFlag); 1634 InFlag = Chain.getValue(1); 1635 } 1636 InFlag =SDOperand(); 1637 1638 // Store the return address to the appropriate stack slot. 1639 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit, 1640 FPDiff); 1641 } 1642 1643 // If the callee is a GlobalAddress node (quite common, every direct call is) 1644 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1645 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1646 // We should use extra load for direct calls to dllimported functions in 1647 // non-JIT mode. 1648 if ((IsTailCall || !Is64Bit || 1649 getTargetMachine().getCodeModel() != CodeModel::Large) 1650 && !Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1651 getTargetMachine(), true)) 1652 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1653 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1654 if (IsTailCall || !Is64Bit || 1655 getTargetMachine().getCodeModel() != CodeModel::Large) 1656 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1657 } else if (IsTailCall) { 1658 unsigned Opc = Is64Bit ? X86::R9 : X86::ECX; 1659 1660 Chain = DAG.getCopyToReg(Chain, 1661 DAG.getRegister(Opc, getPointerTy()), 1662 Callee,InFlag); 1663 Callee = DAG.getRegister(Opc, getPointerTy()); 1664 // Add register as live out. 1665 DAG.getMachineFunction().getRegInfo().addLiveOut(Opc); 1666 } 1667 1668 // Returns a chain & a flag for retval copy to use. 1669 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1670 SmallVector<SDOperand, 8> Ops; 1671 1672 if (IsTailCall) { 1673 Ops.push_back(Chain); 1674 Ops.push_back(DAG.getIntPtrConstant(NumBytes)); 1675 Ops.push_back(DAG.getIntPtrConstant(0)); 1676 if (InFlag.Val) 1677 Ops.push_back(InFlag); 1678 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1679 InFlag = Chain.getValue(1); 1680 1681 // Returns a chain & a flag for retval copy to use. 1682 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1683 Ops.clear(); 1684 } 1685 1686 Ops.push_back(Chain); 1687 Ops.push_back(Callee); 1688 1689 if (IsTailCall) 1690 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 1691 1692 // Add argument registers to the end of the list so that they are known live 1693 // into the call. 1694 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1695 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1696 RegsToPass[i].second.getValueType())); 1697 1698 // Add an implicit use GOT pointer in EBX. 1699 if (!IsTailCall && !Is64Bit && 1700 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1701 Subtarget->isPICStyleGOT()) 1702 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 1703 1704 // Add an implicit use of AL for x86 vararg functions. 1705 if (Is64Bit && isVarArg) 1706 Ops.push_back(DAG.getRegister(X86::AL, MVT::i8)); 1707 1708 if (InFlag.Val) 1709 Ops.push_back(InFlag); 1710 1711 if (IsTailCall) { 1712 assert(InFlag.Val && 1713 "Flag must be set. Depend on flag being set in LowerRET"); 1714 Chain = DAG.getNode(X86ISD::TAILCALL, 1715 Op.Val->getVTList(), &Ops[0], Ops.size()); 1716 1717 return SDOperand(Chain.Val, Op.ResNo); 1718 } 1719 1720 Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size()); 1721 InFlag = Chain.getValue(1); 1722 1723 // Create the CALLSEQ_END node. 1724 unsigned NumBytesForCalleeToPush; 1725 if (IsCalleePop(Op)) 1726 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 1727 else if (!Is64Bit && IsStructRet) 1728 // If this is is a call to a struct-return function, the callee 1729 // pops the hidden struct pointer, so we have to push it back. 1730 // This is common for Darwin/X86, Linux & Mingw32 targets. 1731 NumBytesForCalleeToPush = 4; 1732 else 1733 NumBytesForCalleeToPush = 0; // Callee pops nothing. 1734 1735 // Returns a flag for retval copy to use. 1736 Chain = DAG.getCALLSEQ_END(Chain, 1737 DAG.getIntPtrConstant(NumBytes), 1738 DAG.getIntPtrConstant(NumBytesForCalleeToPush), 1739 InFlag); 1740 InFlag = Chain.getValue(1); 1741 1742 // Handle result values, copying them out of physregs into vregs that we 1743 // return. 1744 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1745} 1746 1747 1748//===----------------------------------------------------------------------===// 1749// Fast Calling Convention (tail call) implementation 1750//===----------------------------------------------------------------------===// 1751 1752// Like std call, callee cleans arguments, convention except that ECX is 1753// reserved for storing the tail called function address. Only 2 registers are 1754// free for argument passing (inreg). Tail call optimization is performed 1755// provided: 1756// * tailcallopt is enabled 1757// * caller/callee are fastcc 1758// On X86_64 architecture with GOT-style position independent code only local 1759// (within module) calls are supported at the moment. 1760// To keep the stack aligned according to platform abi the function 1761// GetAlignedArgumentStackSize ensures that argument delta is always multiples 1762// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 1763// If a tail called function callee has more arguments than the caller the 1764// caller needs to make sure that there is room to move the RETADDR to. This is 1765// achieved by reserving an area the size of the argument delta right after the 1766// original REtADDR, but before the saved framepointer or the spilled registers 1767// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 1768// stack layout: 1769// arg1 1770// arg2 1771// RETADDR 1772// [ new RETADDR 1773// move area ] 1774// (possible EBP) 1775// ESI 1776// EDI 1777// local1 .. 1778 1779/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 1780/// for a 16 byte align requirement. 1781unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 1782 SelectionDAG& DAG) { 1783 if (PerformTailCallOpt) { 1784 MachineFunction &MF = DAG.getMachineFunction(); 1785 const TargetMachine &TM = MF.getTarget(); 1786 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 1787 unsigned StackAlignment = TFI.getStackAlignment(); 1788 uint64_t AlignMask = StackAlignment - 1; 1789 int64_t Offset = StackSize; 1790 unsigned SlotSize = Subtarget->is64Bit() ? 8 : 4; 1791 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 1792 // Number smaller than 12 so just add the difference. 1793 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 1794 } else { 1795 // Mask out lower bits, add stackalignment once plus the 12 bytes. 1796 Offset = ((~AlignMask) & Offset) + StackAlignment + 1797 (StackAlignment-SlotSize); 1798 } 1799 StackSize = Offset; 1800 } 1801 return StackSize; 1802} 1803 1804/// IsEligibleForTailCallElimination - Check to see whether the next instruction 1805/// following the call is a return. A function is eligible if caller/callee 1806/// calling conventions match, currently only fastcc supports tail calls, and 1807/// the function CALL is immediatly followed by a RET. 1808bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call, 1809 SDOperand Ret, 1810 SelectionDAG& DAG) const { 1811 if (!PerformTailCallOpt) 1812 return false; 1813 1814 if (CheckTailCallReturnConstraints(Call, Ret)) { 1815 MachineFunction &MF = DAG.getMachineFunction(); 1816 unsigned CallerCC = MF.getFunction()->getCallingConv(); 1817 unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue(); 1818 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 1819 SDOperand Callee = Call.getOperand(4); 1820 // On x86/32Bit PIC/GOT tail calls are supported. 1821 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ || 1822 !Subtarget->isPICStyleGOT()|| !Subtarget->is64Bit()) 1823 return true; 1824 1825 // Can only do local tail calls (in same module, hidden or protected) on 1826 // x86_64 PIC/GOT at the moment. 1827 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1828 return G->getGlobal()->hasHiddenVisibility() 1829 || G->getGlobal()->hasProtectedVisibility(); 1830 } 1831 } 1832 1833 return false; 1834} 1835 1836//===----------------------------------------------------------------------===// 1837// Other Lowering Hooks 1838//===----------------------------------------------------------------------===// 1839 1840 1841SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { 1842 MachineFunction &MF = DAG.getMachineFunction(); 1843 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1844 int ReturnAddrIndex = FuncInfo->getRAIndex(); 1845 1846 if (ReturnAddrIndex == 0) { 1847 // Set up a frame object for the return address. 1848 if (Subtarget->is64Bit()) 1849 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8); 1850 else 1851 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4); 1852 1853 FuncInfo->setRAIndex(ReturnAddrIndex); 1854 } 1855 1856 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 1857} 1858 1859 1860 1861/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86 1862/// specific condition code. It returns a false if it cannot do a direct 1863/// translation. X86CC is the translated CondCode. LHS/RHS are modified as 1864/// needed. 1865static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 1866 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS, 1867 SelectionDAG &DAG) { 1868 X86CC = X86::COND_INVALID; 1869 if (!isFP) { 1870 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 1871 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 1872 // X > -1 -> X == 0, jump !sign. 1873 RHS = DAG.getConstant(0, RHS.getValueType()); 1874 X86CC = X86::COND_NS; 1875 return true; 1876 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 1877 // X < 0 -> X == 0, jump on sign. 1878 X86CC = X86::COND_S; 1879 return true; 1880 } else if (SetCCOpcode == ISD::SETLT && RHSC->getValue() == 1) { 1881 // X < 1 -> X <= 0 1882 RHS = DAG.getConstant(0, RHS.getValueType()); 1883 X86CC = X86::COND_LE; 1884 return true; 1885 } 1886 } 1887 1888 switch (SetCCOpcode) { 1889 default: break; 1890 case ISD::SETEQ: X86CC = X86::COND_E; break; 1891 case ISD::SETGT: X86CC = X86::COND_G; break; 1892 case ISD::SETGE: X86CC = X86::COND_GE; break; 1893 case ISD::SETLT: X86CC = X86::COND_L; break; 1894 case ISD::SETLE: X86CC = X86::COND_LE; break; 1895 case ISD::SETNE: X86CC = X86::COND_NE; break; 1896 case ISD::SETULT: X86CC = X86::COND_B; break; 1897 case ISD::SETUGT: X86CC = X86::COND_A; break; 1898 case ISD::SETULE: X86CC = X86::COND_BE; break; 1899 case ISD::SETUGE: X86CC = X86::COND_AE; break; 1900 } 1901 } else { 1902 // On a floating point condition, the flags are set as follows: 1903 // ZF PF CF op 1904 // 0 | 0 | 0 | X > Y 1905 // 0 | 0 | 1 | X < Y 1906 // 1 | 0 | 0 | X == Y 1907 // 1 | 1 | 1 | unordered 1908 bool Flip = false; 1909 switch (SetCCOpcode) { 1910 default: break; 1911 case ISD::SETUEQ: 1912 case ISD::SETEQ: X86CC = X86::COND_E; break; 1913 case ISD::SETOLT: Flip = true; // Fallthrough 1914 case ISD::SETOGT: 1915 case ISD::SETGT: X86CC = X86::COND_A; break; 1916 case ISD::SETOLE: Flip = true; // Fallthrough 1917 case ISD::SETOGE: 1918 case ISD::SETGE: X86CC = X86::COND_AE; break; 1919 case ISD::SETUGT: Flip = true; // Fallthrough 1920 case ISD::SETULT: 1921 case ISD::SETLT: X86CC = X86::COND_B; break; 1922 case ISD::SETUGE: Flip = true; // Fallthrough 1923 case ISD::SETULE: 1924 case ISD::SETLE: X86CC = X86::COND_BE; break; 1925 case ISD::SETONE: 1926 case ISD::SETNE: X86CC = X86::COND_NE; break; 1927 case ISD::SETUO: X86CC = X86::COND_P; break; 1928 case ISD::SETO: X86CC = X86::COND_NP; break; 1929 } 1930 if (Flip) 1931 std::swap(LHS, RHS); 1932 } 1933 1934 return X86CC != X86::COND_INVALID; 1935} 1936 1937/// hasFPCMov - is there a floating point cmov for the specific X86 condition 1938/// code. Current x86 isa includes the following FP cmov instructions: 1939/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 1940static bool hasFPCMov(unsigned X86CC) { 1941 switch (X86CC) { 1942 default: 1943 return false; 1944 case X86::COND_B: 1945 case X86::COND_BE: 1946 case X86::COND_E: 1947 case X86::COND_P: 1948 case X86::COND_A: 1949 case X86::COND_AE: 1950 case X86::COND_NE: 1951 case X86::COND_NP: 1952 return true; 1953 } 1954} 1955 1956/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return 1957/// true if Op is undef or if its value falls within the specified range (L, H]. 1958static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { 1959 if (Op.getOpcode() == ISD::UNDEF) 1960 return true; 1961 1962 unsigned Val = cast<ConstantSDNode>(Op)->getValue(); 1963 return (Val >= Low && Val < Hi); 1964} 1965 1966/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return 1967/// true if Op is undef or if its value equal to the specified value. 1968static bool isUndefOrEqual(SDOperand Op, unsigned Val) { 1969 if (Op.getOpcode() == ISD::UNDEF) 1970 return true; 1971 return cast<ConstantSDNode>(Op)->getValue() == Val; 1972} 1973 1974/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand 1975/// specifies a shuffle of elements that is suitable for input to PSHUFD. 1976bool X86::isPSHUFDMask(SDNode *N) { 1977 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1978 1979 if (N->getNumOperands() != 2 && N->getNumOperands() != 4) 1980 return false; 1981 1982 // Check if the value doesn't reference the second vector. 1983 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1984 SDOperand Arg = N->getOperand(i); 1985 if (Arg.getOpcode() == ISD::UNDEF) continue; 1986 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1987 if (cast<ConstantSDNode>(Arg)->getValue() >= e) 1988 return false; 1989 } 1990 1991 return true; 1992} 1993 1994/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand 1995/// specifies a shuffle of elements that is suitable for input to PSHUFHW. 1996bool X86::isPSHUFHWMask(SDNode *N) { 1997 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1998 1999 if (N->getNumOperands() != 8) 2000 return false; 2001 2002 // Lower quadword copied in order. 2003 for (unsigned i = 0; i != 4; ++i) { 2004 SDOperand Arg = N->getOperand(i); 2005 if (Arg.getOpcode() == ISD::UNDEF) continue; 2006 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2007 if (cast<ConstantSDNode>(Arg)->getValue() != i) 2008 return false; 2009 } 2010 2011 // Upper quadword shuffled. 2012 for (unsigned i = 4; i != 8; ++i) { 2013 SDOperand Arg = N->getOperand(i); 2014 if (Arg.getOpcode() == ISD::UNDEF) continue; 2015 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2016 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2017 if (Val < 4 || Val > 7) 2018 return false; 2019 } 2020 2021 return true; 2022} 2023 2024/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand 2025/// specifies a shuffle of elements that is suitable for input to PSHUFLW. 2026bool X86::isPSHUFLWMask(SDNode *N) { 2027 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2028 2029 if (N->getNumOperands() != 8) 2030 return false; 2031 2032 // Upper quadword copied in order. 2033 for (unsigned i = 4; i != 8; ++i) 2034 if (!isUndefOrEqual(N->getOperand(i), i)) 2035 return false; 2036 2037 // Lower quadword shuffled. 2038 for (unsigned i = 0; i != 4; ++i) 2039 if (!isUndefOrInRange(N->getOperand(i), 0, 4)) 2040 return false; 2041 2042 return true; 2043} 2044 2045/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 2046/// specifies a shuffle of elements that is suitable for input to SHUFP*. 2047static bool isSHUFPMask(SDOperandPtr Elems, unsigned NumElems) { 2048 if (NumElems != 2 && NumElems != 4) return false; 2049 2050 unsigned Half = NumElems / 2; 2051 for (unsigned i = 0; i < Half; ++i) 2052 if (!isUndefOrInRange(Elems[i], 0, NumElems)) 2053 return false; 2054 for (unsigned i = Half; i < NumElems; ++i) 2055 if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2)) 2056 return false; 2057 2058 return true; 2059} 2060 2061bool X86::isSHUFPMask(SDNode *N) { 2062 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2063 return ::isSHUFPMask(N->op_begin(), N->getNumOperands()); 2064} 2065 2066/// isCommutedSHUFP - Returns true if the shuffle mask is exactly 2067/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 2068/// half elements to come from vector 1 (which would equal the dest.) and 2069/// the upper half to come from vector 2. 2070static bool isCommutedSHUFP(SDOperandPtr Ops, unsigned NumOps) { 2071 if (NumOps != 2 && NumOps != 4) return false; 2072 2073 unsigned Half = NumOps / 2; 2074 for (unsigned i = 0; i < Half; ++i) 2075 if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2)) 2076 return false; 2077 for (unsigned i = Half; i < NumOps; ++i) 2078 if (!isUndefOrInRange(Ops[i], 0, NumOps)) 2079 return false; 2080 return true; 2081} 2082 2083static bool isCommutedSHUFP(SDNode *N) { 2084 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2085 return isCommutedSHUFP(N->op_begin(), N->getNumOperands()); 2086} 2087 2088/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 2089/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 2090bool X86::isMOVHLPSMask(SDNode *N) { 2091 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2092 2093 if (N->getNumOperands() != 4) 2094 return false; 2095 2096 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 2097 return isUndefOrEqual(N->getOperand(0), 6) && 2098 isUndefOrEqual(N->getOperand(1), 7) && 2099 isUndefOrEqual(N->getOperand(2), 2) && 2100 isUndefOrEqual(N->getOperand(3), 3); 2101} 2102 2103/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 2104/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 2105/// <2, 3, 2, 3> 2106bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) { 2107 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2108 2109 if (N->getNumOperands() != 4) 2110 return false; 2111 2112 // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3 2113 return isUndefOrEqual(N->getOperand(0), 2) && 2114 isUndefOrEqual(N->getOperand(1), 3) && 2115 isUndefOrEqual(N->getOperand(2), 2) && 2116 isUndefOrEqual(N->getOperand(3), 3); 2117} 2118 2119/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 2120/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 2121bool X86::isMOVLPMask(SDNode *N) { 2122 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2123 2124 unsigned NumElems = N->getNumOperands(); 2125 if (NumElems != 2 && NumElems != 4) 2126 return false; 2127 2128 for (unsigned i = 0; i < NumElems/2; ++i) 2129 if (!isUndefOrEqual(N->getOperand(i), i + NumElems)) 2130 return false; 2131 2132 for (unsigned i = NumElems/2; i < NumElems; ++i) 2133 if (!isUndefOrEqual(N->getOperand(i), i)) 2134 return false; 2135 2136 return true; 2137} 2138 2139/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand 2140/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D} 2141/// and MOVLHPS. 2142bool X86::isMOVHPMask(SDNode *N) { 2143 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2144 2145 unsigned NumElems = N->getNumOperands(); 2146 if (NumElems != 2 && NumElems != 4) 2147 return false; 2148 2149 for (unsigned i = 0; i < NumElems/2; ++i) 2150 if (!isUndefOrEqual(N->getOperand(i), i)) 2151 return false; 2152 2153 for (unsigned i = 0; i < NumElems/2; ++i) { 2154 SDOperand Arg = N->getOperand(i + NumElems/2); 2155 if (!isUndefOrEqual(Arg, i + NumElems)) 2156 return false; 2157 } 2158 2159 return true; 2160} 2161 2162/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 2163/// specifies a shuffle of elements that is suitable for input to UNPCKL. 2164bool static isUNPCKLMask(SDOperandPtr Elts, unsigned NumElts, 2165 bool V2IsSplat = false) { 2166 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2167 return false; 2168 2169 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2170 SDOperand BitI = Elts[i]; 2171 SDOperand BitI1 = Elts[i+1]; 2172 if (!isUndefOrEqual(BitI, j)) 2173 return false; 2174 if (V2IsSplat) { 2175 if (isUndefOrEqual(BitI1, NumElts)) 2176 return false; 2177 } else { 2178 if (!isUndefOrEqual(BitI1, j + NumElts)) 2179 return false; 2180 } 2181 } 2182 2183 return true; 2184} 2185 2186bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) { 2187 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2188 return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2189} 2190 2191/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 2192/// specifies a shuffle of elements that is suitable for input to UNPCKH. 2193bool static isUNPCKHMask(SDOperandPtr Elts, unsigned NumElts, 2194 bool V2IsSplat = false) { 2195 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2196 return false; 2197 2198 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2199 SDOperand BitI = Elts[i]; 2200 SDOperand BitI1 = Elts[i+1]; 2201 if (!isUndefOrEqual(BitI, j + NumElts/2)) 2202 return false; 2203 if (V2IsSplat) { 2204 if (isUndefOrEqual(BitI1, NumElts)) 2205 return false; 2206 } else { 2207 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts)) 2208 return false; 2209 } 2210 } 2211 2212 return true; 2213} 2214 2215bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) { 2216 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2217 return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2218} 2219 2220/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 2221/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 2222/// <0, 0, 1, 1> 2223bool X86::isUNPCKL_v_undef_Mask(SDNode *N) { 2224 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2225 2226 unsigned NumElems = N->getNumOperands(); 2227 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2228 return false; 2229 2230 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2231 SDOperand BitI = N->getOperand(i); 2232 SDOperand BitI1 = N->getOperand(i+1); 2233 2234 if (!isUndefOrEqual(BitI, j)) 2235 return false; 2236 if (!isUndefOrEqual(BitI1, j)) 2237 return false; 2238 } 2239 2240 return true; 2241} 2242 2243/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 2244/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 2245/// <2, 2, 3, 3> 2246bool X86::isUNPCKH_v_undef_Mask(SDNode *N) { 2247 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2248 2249 unsigned NumElems = N->getNumOperands(); 2250 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2251 return false; 2252 2253 for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { 2254 SDOperand BitI = N->getOperand(i); 2255 SDOperand BitI1 = N->getOperand(i + 1); 2256 2257 if (!isUndefOrEqual(BitI, j)) 2258 return false; 2259 if (!isUndefOrEqual(BitI1, j)) 2260 return false; 2261 } 2262 2263 return true; 2264} 2265 2266/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 2267/// specifies a shuffle of elements that is suitable for input to MOVSS, 2268/// MOVSD, and MOVD, i.e. setting the lowest element. 2269static bool isMOVLMask(SDOperandPtr Elts, unsigned NumElts) { 2270 if (NumElts != 2 && NumElts != 4) 2271 return false; 2272 2273 if (!isUndefOrEqual(Elts[0], NumElts)) 2274 return false; 2275 2276 for (unsigned i = 1; i < NumElts; ++i) { 2277 if (!isUndefOrEqual(Elts[i], i)) 2278 return false; 2279 } 2280 2281 return true; 2282} 2283 2284bool X86::isMOVLMask(SDNode *N) { 2285 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2286 return ::isMOVLMask(N->op_begin(), N->getNumOperands()); 2287} 2288 2289/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 2290/// of what x86 movss want. X86 movs requires the lowest element to be lowest 2291/// element of vector 2 and the other elements to come from vector 1 in order. 2292static bool isCommutedMOVL(SDOperandPtr Ops, unsigned NumOps, 2293 bool V2IsSplat = false, 2294 bool V2IsUndef = false) { 2295 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 2296 return false; 2297 2298 if (!isUndefOrEqual(Ops[0], 0)) 2299 return false; 2300 2301 for (unsigned i = 1; i < NumOps; ++i) { 2302 SDOperand Arg = Ops[i]; 2303 if (!(isUndefOrEqual(Arg, i+NumOps) || 2304 (V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) || 2305 (V2IsSplat && isUndefOrEqual(Arg, NumOps)))) 2306 return false; 2307 } 2308 2309 return true; 2310} 2311 2312static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false, 2313 bool V2IsUndef = false) { 2314 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2315 return isCommutedMOVL(N->op_begin(), N->getNumOperands(), 2316 V2IsSplat, V2IsUndef); 2317} 2318 2319/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2320/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 2321bool X86::isMOVSHDUPMask(SDNode *N) { 2322 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2323 2324 if (N->getNumOperands() != 4) 2325 return false; 2326 2327 // Expect 1, 1, 3, 3 2328 for (unsigned i = 0; i < 2; ++i) { 2329 SDOperand Arg = N->getOperand(i); 2330 if (Arg.getOpcode() == ISD::UNDEF) continue; 2331 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2332 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2333 if (Val != 1) return false; 2334 } 2335 2336 bool HasHi = false; 2337 for (unsigned i = 2; i < 4; ++i) { 2338 SDOperand Arg = N->getOperand(i); 2339 if (Arg.getOpcode() == ISD::UNDEF) continue; 2340 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2341 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2342 if (Val != 3) return false; 2343 HasHi = true; 2344 } 2345 2346 // Don't use movshdup if it can be done with a shufps. 2347 return HasHi; 2348} 2349 2350/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2351/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 2352bool X86::isMOVSLDUPMask(SDNode *N) { 2353 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2354 2355 if (N->getNumOperands() != 4) 2356 return false; 2357 2358 // Expect 0, 0, 2, 2 2359 for (unsigned i = 0; i < 2; ++i) { 2360 SDOperand Arg = N->getOperand(i); 2361 if (Arg.getOpcode() == ISD::UNDEF) continue; 2362 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2363 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2364 if (Val != 0) return false; 2365 } 2366 2367 bool HasHi = false; 2368 for (unsigned i = 2; i < 4; ++i) { 2369 SDOperand Arg = N->getOperand(i); 2370 if (Arg.getOpcode() == ISD::UNDEF) continue; 2371 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2372 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2373 if (Val != 2) return false; 2374 HasHi = true; 2375 } 2376 2377 // Don't use movshdup if it can be done with a shufps. 2378 return HasHi; 2379} 2380 2381/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand 2382/// specifies a identity operation on the LHS or RHS. 2383static bool isIdentityMask(SDNode *N, bool RHS = false) { 2384 unsigned NumElems = N->getNumOperands(); 2385 for (unsigned i = 0; i < NumElems; ++i) 2386 if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0))) 2387 return false; 2388 return true; 2389} 2390 2391/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2392/// a splat of a single element. 2393static bool isSplatMask(SDNode *N) { 2394 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2395 2396 // This is a splat operation if each element of the permute is the same, and 2397 // if the value doesn't reference the second vector. 2398 unsigned NumElems = N->getNumOperands(); 2399 SDOperand ElementBase; 2400 unsigned i = 0; 2401 for (; i != NumElems; ++i) { 2402 SDOperand Elt = N->getOperand(i); 2403 if (isa<ConstantSDNode>(Elt)) { 2404 ElementBase = Elt; 2405 break; 2406 } 2407 } 2408 2409 if (!ElementBase.Val) 2410 return false; 2411 2412 for (; i != NumElems; ++i) { 2413 SDOperand Arg = N->getOperand(i); 2414 if (Arg.getOpcode() == ISD::UNDEF) continue; 2415 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2416 if (Arg != ElementBase) return false; 2417 } 2418 2419 // Make sure it is a splat of the first vector operand. 2420 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems; 2421} 2422 2423/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2424/// a splat of a single element and it's a 2 or 4 element mask. 2425bool X86::isSplatMask(SDNode *N) { 2426 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2427 2428 // We can only splat 64-bit, and 32-bit quantities with a single instruction. 2429 if (N->getNumOperands() != 4 && N->getNumOperands() != 2) 2430 return false; 2431 return ::isSplatMask(N); 2432} 2433 2434/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand 2435/// specifies a splat of zero element. 2436bool X86::isSplatLoMask(SDNode *N) { 2437 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2438 2439 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) 2440 if (!isUndefOrEqual(N->getOperand(i), 0)) 2441 return false; 2442 return true; 2443} 2444 2445/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 2446/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* 2447/// instructions. 2448unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 2449 unsigned NumOperands = N->getNumOperands(); 2450 unsigned Shift = (NumOperands == 4) ? 2 : 1; 2451 unsigned Mask = 0; 2452 for (unsigned i = 0; i < NumOperands; ++i) { 2453 unsigned Val = 0; 2454 SDOperand Arg = N->getOperand(NumOperands-i-1); 2455 if (Arg.getOpcode() != ISD::UNDEF) 2456 Val = cast<ConstantSDNode>(Arg)->getValue(); 2457 if (Val >= NumOperands) Val -= NumOperands; 2458 Mask |= Val; 2459 if (i != NumOperands - 1) 2460 Mask <<= Shift; 2461 } 2462 2463 return Mask; 2464} 2465 2466/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 2467/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW 2468/// instructions. 2469unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 2470 unsigned Mask = 0; 2471 // 8 nodes, but we only care about the last 4. 2472 for (unsigned i = 7; i >= 4; --i) { 2473 unsigned Val = 0; 2474 SDOperand Arg = N->getOperand(i); 2475 if (Arg.getOpcode() != ISD::UNDEF) 2476 Val = cast<ConstantSDNode>(Arg)->getValue(); 2477 Mask |= (Val - 4); 2478 if (i != 4) 2479 Mask <<= 2; 2480 } 2481 2482 return Mask; 2483} 2484 2485/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 2486/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW 2487/// instructions. 2488unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 2489 unsigned Mask = 0; 2490 // 8 nodes, but we only care about the first 4. 2491 for (int i = 3; i >= 0; --i) { 2492 unsigned Val = 0; 2493 SDOperand Arg = N->getOperand(i); 2494 if (Arg.getOpcode() != ISD::UNDEF) 2495 Val = cast<ConstantSDNode>(Arg)->getValue(); 2496 Mask |= Val; 2497 if (i != 0) 2498 Mask <<= 2; 2499 } 2500 2501 return Mask; 2502} 2503 2504/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand 2505/// specifies a 8 element shuffle that can be broken into a pair of 2506/// PSHUFHW and PSHUFLW. 2507static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { 2508 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2509 2510 if (N->getNumOperands() != 8) 2511 return false; 2512 2513 // Lower quadword shuffled. 2514 for (unsigned i = 0; i != 4; ++i) { 2515 SDOperand Arg = N->getOperand(i); 2516 if (Arg.getOpcode() == ISD::UNDEF) continue; 2517 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2518 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2519 if (Val >= 4) 2520 return false; 2521 } 2522 2523 // Upper quadword shuffled. 2524 for (unsigned i = 4; i != 8; ++i) { 2525 SDOperand Arg = N->getOperand(i); 2526 if (Arg.getOpcode() == ISD::UNDEF) continue; 2527 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2528 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2529 if (Val < 4 || Val > 7) 2530 return false; 2531 } 2532 2533 return true; 2534} 2535 2536/// CommuteVectorShuffle - Swap vector_shuffle operands as well as 2537/// values in ther permute mask. 2538static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, 2539 SDOperand &V2, SDOperand &Mask, 2540 SelectionDAG &DAG) { 2541 MVT::ValueType VT = Op.getValueType(); 2542 MVT::ValueType MaskVT = Mask.getValueType(); 2543 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2544 unsigned NumElems = Mask.getNumOperands(); 2545 SmallVector<SDOperand, 8> MaskVec; 2546 2547 for (unsigned i = 0; i != NumElems; ++i) { 2548 SDOperand Arg = Mask.getOperand(i); 2549 if (Arg.getOpcode() == ISD::UNDEF) { 2550 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2551 continue; 2552 } 2553 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2554 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2555 if (Val < NumElems) 2556 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2557 else 2558 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2559 } 2560 2561 std::swap(V1, V2); 2562 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2563 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2564} 2565 2566/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 2567/// the two vector operands have swapped position. 2568static 2569SDOperand CommuteVectorShuffleMask(SDOperand Mask, SelectionDAG &DAG) { 2570 MVT::ValueType MaskVT = Mask.getValueType(); 2571 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2572 unsigned NumElems = Mask.getNumOperands(); 2573 SmallVector<SDOperand, 8> MaskVec; 2574 for (unsigned i = 0; i != NumElems; ++i) { 2575 SDOperand Arg = Mask.getOperand(i); 2576 if (Arg.getOpcode() == ISD::UNDEF) { 2577 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2578 continue; 2579 } 2580 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2581 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2582 if (Val < NumElems) 2583 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2584 else 2585 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2586 } 2587 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2588} 2589 2590 2591/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 2592/// match movhlps. The lower half elements should come from upper half of 2593/// V1 (and in order), and the upper half elements should come from the upper 2594/// half of V2 (and in order). 2595static bool ShouldXformToMOVHLPS(SDNode *Mask) { 2596 unsigned NumElems = Mask->getNumOperands(); 2597 if (NumElems != 4) 2598 return false; 2599 for (unsigned i = 0, e = 2; i != e; ++i) 2600 if (!isUndefOrEqual(Mask->getOperand(i), i+2)) 2601 return false; 2602 for (unsigned i = 2; i != 4; ++i) 2603 if (!isUndefOrEqual(Mask->getOperand(i), i+4)) 2604 return false; 2605 return true; 2606} 2607 2608/// isScalarLoadToVector - Returns true if the node is a scalar load that 2609/// is promoted to a vector. It also returns the LoadSDNode by reference if 2610/// required. 2611static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) { 2612 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) { 2613 N = N->getOperand(0).Val; 2614 if (ISD::isNON_EXTLoad(N)) { 2615 if (LD) 2616 *LD = cast<LoadSDNode>(N); 2617 return true; 2618 } 2619 } 2620 return false; 2621} 2622 2623/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 2624/// match movlp{s|d}. The lower half elements should come from lower half of 2625/// V1 (and in order), and the upper half elements should come from the upper 2626/// half of V2 (and in order). And since V1 will become the source of the 2627/// MOVLP, it must be either a vector load or a scalar load to vector. 2628static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) { 2629 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 2630 return false; 2631 // Is V2 is a vector load, don't do this transformation. We will try to use 2632 // load folding shufps op. 2633 if (ISD::isNON_EXTLoad(V2)) 2634 return false; 2635 2636 unsigned NumElems = Mask->getNumOperands(); 2637 if (NumElems != 2 && NumElems != 4) 2638 return false; 2639 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 2640 if (!isUndefOrEqual(Mask->getOperand(i), i)) 2641 return false; 2642 for (unsigned i = NumElems/2; i != NumElems; ++i) 2643 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems)) 2644 return false; 2645 return true; 2646} 2647 2648/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 2649/// all the same. 2650static bool isSplatVector(SDNode *N) { 2651 if (N->getOpcode() != ISD::BUILD_VECTOR) 2652 return false; 2653 2654 SDOperand SplatValue = N->getOperand(0); 2655 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 2656 if (N->getOperand(i) != SplatValue) 2657 return false; 2658 return true; 2659} 2660 2661/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2662/// to an undef. 2663static bool isUndefShuffle(SDNode *N) { 2664 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2665 return false; 2666 2667 SDOperand V1 = N->getOperand(0); 2668 SDOperand V2 = N->getOperand(1); 2669 SDOperand Mask = N->getOperand(2); 2670 unsigned NumElems = Mask.getNumOperands(); 2671 for (unsigned i = 0; i != NumElems; ++i) { 2672 SDOperand Arg = Mask.getOperand(i); 2673 if (Arg.getOpcode() != ISD::UNDEF) { 2674 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2675 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF) 2676 return false; 2677 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF) 2678 return false; 2679 } 2680 } 2681 return true; 2682} 2683 2684/// isZeroNode - Returns true if Elt is a constant zero or a floating point 2685/// constant +0.0. 2686static inline bool isZeroNode(SDOperand Elt) { 2687 return ((isa<ConstantSDNode>(Elt) && 2688 cast<ConstantSDNode>(Elt)->getValue() == 0) || 2689 (isa<ConstantFPSDNode>(Elt) && 2690 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 2691} 2692 2693/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2694/// to an zero vector. 2695static bool isZeroShuffle(SDNode *N) { 2696 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2697 return false; 2698 2699 SDOperand V1 = N->getOperand(0); 2700 SDOperand V2 = N->getOperand(1); 2701 SDOperand Mask = N->getOperand(2); 2702 unsigned NumElems = Mask.getNumOperands(); 2703 for (unsigned i = 0; i != NumElems; ++i) { 2704 SDOperand Arg = Mask.getOperand(i); 2705 if (Arg.getOpcode() == ISD::UNDEF) 2706 continue; 2707 2708 unsigned Idx = cast<ConstantSDNode>(Arg)->getValue(); 2709 if (Idx < NumElems) { 2710 unsigned Opc = V1.Val->getOpcode(); 2711 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.Val)) 2712 continue; 2713 if (Opc != ISD::BUILD_VECTOR || 2714 !isZeroNode(V1.Val->getOperand(Idx))) 2715 return false; 2716 } else if (Idx >= NumElems) { 2717 unsigned Opc = V2.Val->getOpcode(); 2718 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.Val)) 2719 continue; 2720 if (Opc != ISD::BUILD_VECTOR || 2721 !isZeroNode(V2.Val->getOperand(Idx - NumElems))) 2722 return false; 2723 } 2724 } 2725 return true; 2726} 2727 2728/// getZeroVector - Returns a vector of specified type with all zero elements. 2729/// 2730static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) { 2731 assert(MVT::isVector(VT) && "Expected a vector type"); 2732 2733 // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2734 // type. This ensures they get CSE'd. 2735 SDOperand Cst = DAG.getTargetConstant(0, MVT::i32); 2736 SDOperand Vec; 2737 if (MVT::getSizeInBits(VT) == 64) // MMX 2738 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2739 else // SSE 2740 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2741 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2742} 2743 2744/// getOnesVector - Returns a vector of specified type with all bits set. 2745/// 2746static SDOperand getOnesVector(MVT::ValueType VT, SelectionDAG &DAG) { 2747 assert(MVT::isVector(VT) && "Expected a vector type"); 2748 2749 // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2750 // type. This ensures they get CSE'd. 2751 SDOperand Cst = DAG.getTargetConstant(~0U, MVT::i32); 2752 SDOperand Vec; 2753 if (MVT::getSizeInBits(VT) == 64) // MMX 2754 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2755 else // SSE 2756 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2757 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2758} 2759 2760 2761/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 2762/// that point to V2 points to its first element. 2763static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { 2764 assert(Mask.getOpcode() == ISD::BUILD_VECTOR); 2765 2766 bool Changed = false; 2767 SmallVector<SDOperand, 8> MaskVec; 2768 unsigned NumElems = Mask.getNumOperands(); 2769 for (unsigned i = 0; i != NumElems; ++i) { 2770 SDOperand Arg = Mask.getOperand(i); 2771 if (Arg.getOpcode() != ISD::UNDEF) { 2772 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2773 if (Val > NumElems) { 2774 Arg = DAG.getConstant(NumElems, Arg.getValueType()); 2775 Changed = true; 2776 } 2777 } 2778 MaskVec.push_back(Arg); 2779 } 2780 2781 if (Changed) 2782 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), 2783 &MaskVec[0], MaskVec.size()); 2784 return Mask; 2785} 2786 2787/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 2788/// operation of specified width. 2789static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { 2790 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2791 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2792 2793 SmallVector<SDOperand, 8> MaskVec; 2794 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); 2795 for (unsigned i = 1; i != NumElems; ++i) 2796 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2797 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2798} 2799 2800/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation 2801/// of specified width. 2802static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { 2803 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2804 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2805 SmallVector<SDOperand, 8> MaskVec; 2806 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 2807 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2808 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); 2809 } 2810 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2811} 2812 2813/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation 2814/// of specified width. 2815static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { 2816 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2817 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2818 unsigned Half = NumElems/2; 2819 SmallVector<SDOperand, 8> MaskVec; 2820 for (unsigned i = 0; i != Half; ++i) { 2821 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); 2822 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); 2823 } 2824 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2825} 2826 2827/// getSwapEltZeroMask - Returns a vector_shuffle mask for a shuffle that swaps 2828/// element #0 of a vector with the specified index, leaving the rest of the 2829/// elements in place. 2830static SDOperand getSwapEltZeroMask(unsigned NumElems, unsigned DestElt, 2831 SelectionDAG &DAG) { 2832 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2833 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2834 SmallVector<SDOperand, 8> MaskVec; 2835 // Element #0 of the result gets the elt we are replacing. 2836 MaskVec.push_back(DAG.getConstant(DestElt, BaseVT)); 2837 for (unsigned i = 1; i != NumElems; ++i) 2838 MaskVec.push_back(DAG.getConstant(i == DestElt ? 0 : i, BaseVT)); 2839 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2840} 2841 2842/// PromoteSplat - Promote a splat of v4f32, v8i16 or v16i8 to v4i32. 2843static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG, bool HasSSE2) { 2844 MVT::ValueType PVT = HasSSE2 ? MVT::v4i32 : MVT::v4f32; 2845 MVT::ValueType VT = Op.getValueType(); 2846 if (PVT == VT) 2847 return Op; 2848 SDOperand V1 = Op.getOperand(0); 2849 SDOperand Mask = Op.getOperand(2); 2850 unsigned NumElems = Mask.getNumOperands(); 2851 // Special handling of v4f32 -> v4i32. 2852 if (VT != MVT::v4f32) { 2853 Mask = getUnpacklMask(NumElems, DAG); 2854 while (NumElems > 4) { 2855 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask); 2856 NumElems >>= 1; 2857 } 2858 Mask = getZeroVector(MVT::v4i32, DAG); 2859 } 2860 2861 V1 = DAG.getNode(ISD::BIT_CONVERT, PVT, V1); 2862 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, PVT, V1, 2863 DAG.getNode(ISD::UNDEF, PVT), Mask); 2864 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); 2865} 2866 2867/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 2868/// vector of zero or undef vector. This produces a shuffle where the low 2869/// element of V2 is swizzled into the zero/undef vector, landing at element 2870/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 2871static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, unsigned Idx, 2872 bool isZero, SelectionDAG &DAG) { 2873 MVT::ValueType VT = V2.getValueType(); 2874 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT); 2875 unsigned NumElems = MVT::getVectorNumElements(V2.getValueType()); 2876 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2877 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 2878 SmallVector<SDOperand, 16> MaskVec; 2879 for (unsigned i = 0; i != NumElems; ++i) 2880 if (i == Idx) // If this is the insertion idx, put the low elt of V2 here. 2881 MaskVec.push_back(DAG.getConstant(NumElems, EVT)); 2882 else 2883 MaskVec.push_back(DAG.getConstant(i, EVT)); 2884 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2885 &MaskVec[0], MaskVec.size()); 2886 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2887} 2888 2889/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 2890/// 2891static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, 2892 unsigned NumNonZero, unsigned NumZero, 2893 SelectionDAG &DAG, TargetLowering &TLI) { 2894 if (NumNonZero > 8) 2895 return SDOperand(); 2896 2897 SDOperand V(0, 0); 2898 bool First = true; 2899 for (unsigned i = 0; i < 16; ++i) { 2900 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 2901 if (ThisIsNonZero && First) { 2902 if (NumZero) 2903 V = getZeroVector(MVT::v8i16, DAG); 2904 else 2905 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2906 First = false; 2907 } 2908 2909 if ((i & 1) != 0) { 2910 SDOperand ThisElt(0, 0), LastElt(0, 0); 2911 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 2912 if (LastIsNonZero) { 2913 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); 2914 } 2915 if (ThisIsNonZero) { 2916 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i)); 2917 ThisElt = DAG.getNode(ISD::SHL, MVT::i16, 2918 ThisElt, DAG.getConstant(8, MVT::i8)); 2919 if (LastIsNonZero) 2920 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt); 2921 } else 2922 ThisElt = LastElt; 2923 2924 if (ThisElt.Val) 2925 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt, 2926 DAG.getIntPtrConstant(i/2)); 2927 } 2928 } 2929 2930 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V); 2931} 2932 2933/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 2934/// 2935static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, 2936 unsigned NumNonZero, unsigned NumZero, 2937 SelectionDAG &DAG, TargetLowering &TLI) { 2938 if (NumNonZero > 4) 2939 return SDOperand(); 2940 2941 SDOperand V(0, 0); 2942 bool First = true; 2943 for (unsigned i = 0; i < 8; ++i) { 2944 bool isNonZero = (NonZeros & (1 << i)) != 0; 2945 if (isNonZero) { 2946 if (First) { 2947 if (NumZero) 2948 V = getZeroVector(MVT::v8i16, DAG); 2949 else 2950 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2951 First = false; 2952 } 2953 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i), 2954 DAG.getIntPtrConstant(i)); 2955 } 2956 } 2957 2958 return V; 2959} 2960 2961SDOperand 2962X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 2963 // All zero's are handled with pxor, all one's are handled with pcmpeqd. 2964 if (ISD::isBuildVectorAllZeros(Op.Val) || ISD::isBuildVectorAllOnes(Op.Val)) { 2965 // Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to 2966 // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are 2967 // eliminated on x86-32 hosts. 2968 if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32) 2969 return Op; 2970 2971 if (ISD::isBuildVectorAllOnes(Op.Val)) 2972 return getOnesVector(Op.getValueType(), DAG); 2973 return getZeroVector(Op.getValueType(), DAG); 2974 } 2975 2976 MVT::ValueType VT = Op.getValueType(); 2977 MVT::ValueType EVT = MVT::getVectorElementType(VT); 2978 unsigned EVTBits = MVT::getSizeInBits(EVT); 2979 2980 unsigned NumElems = Op.getNumOperands(); 2981 unsigned NumZero = 0; 2982 unsigned NumNonZero = 0; 2983 unsigned NonZeros = 0; 2984 bool IsAllConstants = true; 2985 SmallSet<SDOperand, 8> Values; 2986 for (unsigned i = 0; i < NumElems; ++i) { 2987 SDOperand Elt = Op.getOperand(i); 2988 if (Elt.getOpcode() == ISD::UNDEF) 2989 continue; 2990 Values.insert(Elt); 2991 if (Elt.getOpcode() != ISD::Constant && 2992 Elt.getOpcode() != ISD::ConstantFP) 2993 IsAllConstants = false; 2994 if (isZeroNode(Elt)) 2995 NumZero++; 2996 else { 2997 NonZeros |= (1 << i); 2998 NumNonZero++; 2999 } 3000 } 3001 3002 if (NumNonZero == 0) { 3003 // All undef vector. Return an UNDEF. All zero vectors were handled above. 3004 return DAG.getNode(ISD::UNDEF, VT); 3005 } 3006 3007 // Special case for single non-zero, non-undef, element. 3008 if (NumNonZero == 1 && NumElems <= 4) { 3009 unsigned Idx = CountTrailingZeros_32(NonZeros); 3010 SDOperand Item = Op.getOperand(Idx); 3011 3012 // If this is an insertion of an i64 value on x86-32, and if the top bits of 3013 // the value are obviously zero, truncate the value to i32 and do the 3014 // insertion that way. Only do this if the value is non-constant or if the 3015 // value is a constant being inserted into element 0. It is cheaper to do 3016 // a constant pool load than it is to do a movd + shuffle. 3017 if (EVT == MVT::i64 && !Subtarget->is64Bit() && 3018 (!IsAllConstants || Idx == 0)) { 3019 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 3020 // Handle MMX and SSE both. 3021 MVT::ValueType VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32; 3022 MVT::ValueType VecElts = VT == MVT::v2i64 ? 4 : 2; 3023 3024 // Truncate the value (which may itself be a constant) to i32, and 3025 // convert it to a vector with movd (S2V+shuffle to zero extend). 3026 Item = DAG.getNode(ISD::TRUNCATE, MVT::i32, Item); 3027 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VecVT, Item); 3028 Item = getShuffleVectorZeroOrUndef(Item, 0, true, DAG); 3029 3030 // Now we have our 32-bit value zero extended in the low element of 3031 // a vector. If Idx != 0, swizzle it into place. 3032 if (Idx != 0) { 3033 SDOperand Ops[] = { 3034 Item, DAG.getNode(ISD::UNDEF, Item.getValueType()), 3035 getSwapEltZeroMask(VecElts, Idx, DAG) 3036 }; 3037 Item = DAG.getNode(ISD::VECTOR_SHUFFLE, VecVT, Ops, 3); 3038 } 3039 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Item); 3040 } 3041 } 3042 3043 // If we have a constant or non-constant insertion into the low element of 3044 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 3045 // the rest of the elements. This will be matched as movd/movq/movss/movsd 3046 // depending on what the source datatype is. Because we can only get here 3047 // when NumElems <= 4, this only needs to handle i32/f32/i64/f64. 3048 if (Idx == 0 && 3049 // Don't do this for i64 values on x86-32. 3050 (EVT != MVT::i64 || Subtarget->is64Bit())) { 3051 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 3052 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 3053 return getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, DAG); 3054 } 3055 3056 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 3057 return SDOperand(); 3058 3059 // Otherwise, if this is a vector with i32 or f32 elements, and the element 3060 // is a non-constant being inserted into an element other than the low one, 3061 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 3062 // movd/movss) to move this into the low element, then shuffle it into 3063 // place. 3064 if (EVTBits == 32) { 3065 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 3066 3067 // Turn it into a shuffle of zero and zero-extended scalar to vector. 3068 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, DAG); 3069 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3070 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3071 SmallVector<SDOperand, 8> MaskVec; 3072 for (unsigned i = 0; i < NumElems; i++) 3073 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); 3074 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3075 &MaskVec[0], MaskVec.size()); 3076 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item, 3077 DAG.getNode(ISD::UNDEF, VT), Mask); 3078 } 3079 } 3080 3081 // Splat is obviously ok. Let legalizer expand it to a shuffle. 3082 if (Values.size() == 1) 3083 return SDOperand(); 3084 3085 // A vector full of immediates; various special cases are already 3086 // handled, so this is best done with a single constant-pool load. 3087 if (IsAllConstants) 3088 return SDOperand(); 3089 3090 // Let legalizer expand 2-wide build_vectors. 3091 if (EVTBits == 64) { 3092 if (NumNonZero == 1) { 3093 // One half is zero or undef. 3094 unsigned Idx = CountTrailingZeros_32(NonZeros); 3095 SDOperand V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, 3096 Op.getOperand(Idx)); 3097 return getShuffleVectorZeroOrUndef(V2, Idx, true, DAG); 3098 } 3099 return SDOperand(); 3100 } 3101 3102 // If element VT is < 32 bits, convert it to inserts into a zero vector. 3103 if (EVTBits == 8 && NumElems == 16) { 3104 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 3105 *this); 3106 if (V.Val) return V; 3107 } 3108 3109 if (EVTBits == 16 && NumElems == 8) { 3110 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 3111 *this); 3112 if (V.Val) return V; 3113 } 3114 3115 // If element VT is == 32 bits, turn it into a number of shuffles. 3116 SmallVector<SDOperand, 8> V; 3117 V.resize(NumElems); 3118 if (NumElems == 4 && NumZero > 0) { 3119 for (unsigned i = 0; i < 4; ++i) { 3120 bool isZero = !(NonZeros & (1 << i)); 3121 if (isZero) 3122 V[i] = getZeroVector(VT, DAG); 3123 else 3124 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3125 } 3126 3127 for (unsigned i = 0; i < 2; ++i) { 3128 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 3129 default: break; 3130 case 0: 3131 V[i] = V[i*2]; // Must be a zero vector. 3132 break; 3133 case 1: 3134 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2], 3135 getMOVLMask(NumElems, DAG)); 3136 break; 3137 case 2: 3138 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3139 getMOVLMask(NumElems, DAG)); 3140 break; 3141 case 3: 3142 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3143 getUnpacklMask(NumElems, DAG)); 3144 break; 3145 } 3146 } 3147 3148 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3149 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 3150 SmallVector<SDOperand, 8> MaskVec; 3151 bool Reverse = (NonZeros & 0x3) == 2; 3152 for (unsigned i = 0; i < 2; ++i) 3153 if (Reverse) 3154 MaskVec.push_back(DAG.getConstant(1-i, EVT)); 3155 else 3156 MaskVec.push_back(DAG.getConstant(i, EVT)); 3157 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 3158 for (unsigned i = 0; i < 2; ++i) 3159 if (Reverse) 3160 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); 3161 else 3162 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); 3163 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3164 &MaskVec[0], MaskVec.size()); 3165 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask); 3166 } 3167 3168 if (Values.size() > 2) { 3169 // Expand into a number of unpckl*. 3170 // e.g. for v4f32 3171 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 3172 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 3173 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 3174 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG); 3175 for (unsigned i = 0; i < NumElems; ++i) 3176 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3177 NumElems >>= 1; 3178 while (NumElems != 0) { 3179 for (unsigned i = 0; i < NumElems; ++i) 3180 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems], 3181 UnpckMask); 3182 NumElems >>= 1; 3183 } 3184 return V[0]; 3185 } 3186 3187 return SDOperand(); 3188} 3189 3190static 3191SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, 3192 SDOperand PermMask, SelectionDAG &DAG, 3193 TargetLowering &TLI) { 3194 SDOperand NewV; 3195 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(8); 3196 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3197 MVT::ValueType PtrVT = TLI.getPointerTy(); 3198 SmallVector<SDOperand, 8> MaskElts(PermMask.Val->op_begin(), 3199 PermMask.Val->op_end()); 3200 3201 // First record which half of which vector the low elements come from. 3202 SmallVector<unsigned, 4> LowQuad(4); 3203 for (unsigned i = 0; i < 4; ++i) { 3204 SDOperand Elt = MaskElts[i]; 3205 if (Elt.getOpcode() == ISD::UNDEF) 3206 continue; 3207 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3208 int QuadIdx = EltIdx / 4; 3209 ++LowQuad[QuadIdx]; 3210 } 3211 int BestLowQuad = -1; 3212 unsigned MaxQuad = 1; 3213 for (unsigned i = 0; i < 4; ++i) { 3214 if (LowQuad[i] > MaxQuad) { 3215 BestLowQuad = i; 3216 MaxQuad = LowQuad[i]; 3217 } 3218 } 3219 3220 // Record which half of which vector the high elements come from. 3221 SmallVector<unsigned, 4> HighQuad(4); 3222 for (unsigned i = 4; i < 8; ++i) { 3223 SDOperand Elt = MaskElts[i]; 3224 if (Elt.getOpcode() == ISD::UNDEF) 3225 continue; 3226 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3227 int QuadIdx = EltIdx / 4; 3228 ++HighQuad[QuadIdx]; 3229 } 3230 int BestHighQuad = -1; 3231 MaxQuad = 1; 3232 for (unsigned i = 0; i < 4; ++i) { 3233 if (HighQuad[i] > MaxQuad) { 3234 BestHighQuad = i; 3235 MaxQuad = HighQuad[i]; 3236 } 3237 } 3238 3239 // If it's possible to sort parts of either half with PSHUF{H|L}W, then do it. 3240 if (BestLowQuad != -1 || BestHighQuad != -1) { 3241 // First sort the 4 chunks in order using shufpd. 3242 SmallVector<SDOperand, 8> MaskVec; 3243 if (BestLowQuad != -1) 3244 MaskVec.push_back(DAG.getConstant(BestLowQuad, MVT::i32)); 3245 else 3246 MaskVec.push_back(DAG.getConstant(0, MVT::i32)); 3247 if (BestHighQuad != -1) 3248 MaskVec.push_back(DAG.getConstant(BestHighQuad, MVT::i32)); 3249 else 3250 MaskVec.push_back(DAG.getConstant(1, MVT::i32)); 3251 SDOperand Mask= DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, &MaskVec[0],2); 3252 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64, 3253 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V1), 3254 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V2), Mask); 3255 NewV = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, NewV); 3256 3257 // Now sort high and low parts separately. 3258 BitVector InOrder(8); 3259 if (BestLowQuad != -1) { 3260 // Sort lower half in order using PSHUFLW. 3261 MaskVec.clear(); 3262 bool AnyOutOrder = false; 3263 for (unsigned i = 0; i != 4; ++i) { 3264 SDOperand Elt = MaskElts[i]; 3265 if (Elt.getOpcode() == ISD::UNDEF) { 3266 MaskVec.push_back(Elt); 3267 InOrder.set(i); 3268 } else { 3269 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3270 if (EltIdx != i) 3271 AnyOutOrder = true; 3272 MaskVec.push_back(DAG.getConstant(EltIdx % 4, MaskEVT)); 3273 // If this element is in the right place after this shuffle, then 3274 // remember it. 3275 if ((int)(EltIdx / 4) == BestLowQuad) 3276 InOrder.set(i); 3277 } 3278 } 3279 if (AnyOutOrder) { 3280 for (unsigned i = 4; i != 8; ++i) 3281 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3282 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3283 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3284 } 3285 } 3286 3287 if (BestHighQuad != -1) { 3288 // Sort high half in order using PSHUFHW if possible. 3289 MaskVec.clear(); 3290 for (unsigned i = 0; i != 4; ++i) 3291 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3292 bool AnyOutOrder = false; 3293 for (unsigned i = 4; i != 8; ++i) { 3294 SDOperand Elt = MaskElts[i]; 3295 if (Elt.getOpcode() == ISD::UNDEF) { 3296 MaskVec.push_back(Elt); 3297 InOrder.set(i); 3298 } else { 3299 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3300 if (EltIdx != i) 3301 AnyOutOrder = true; 3302 MaskVec.push_back(DAG.getConstant((EltIdx % 4) + 4, MaskEVT)); 3303 // If this element is in the right place after this shuffle, then 3304 // remember it. 3305 if ((int)(EltIdx / 4) == BestHighQuad) 3306 InOrder.set(i); 3307 } 3308 } 3309 if (AnyOutOrder) { 3310 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3311 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3312 } 3313 } 3314 3315 // The other elements are put in the right place using pextrw and pinsrw. 3316 for (unsigned i = 0; i != 8; ++i) { 3317 if (InOrder[i]) 3318 continue; 3319 SDOperand Elt = MaskElts[i]; 3320 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3321 if (EltIdx == i) 3322 continue; 3323 SDOperand ExtOp = (EltIdx < 8) 3324 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3325 DAG.getConstant(EltIdx, PtrVT)) 3326 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3327 DAG.getConstant(EltIdx - 8, PtrVT)); 3328 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3329 DAG.getConstant(i, PtrVT)); 3330 } 3331 return NewV; 3332 } 3333 3334 // PSHUF{H|L}W are not used. Lower into extracts and inserts but try to use 3335 ///as few as possible. 3336 // First, let's find out how many elements are already in the right order. 3337 unsigned V1InOrder = 0; 3338 unsigned V1FromV1 = 0; 3339 unsigned V2InOrder = 0; 3340 unsigned V2FromV2 = 0; 3341 SmallVector<SDOperand, 8> V1Elts; 3342 SmallVector<SDOperand, 8> V2Elts; 3343 for (unsigned i = 0; i < 8; ++i) { 3344 SDOperand Elt = MaskElts[i]; 3345 if (Elt.getOpcode() == ISD::UNDEF) { 3346 V1Elts.push_back(Elt); 3347 V2Elts.push_back(Elt); 3348 ++V1InOrder; 3349 ++V2InOrder; 3350 continue; 3351 } 3352 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3353 if (EltIdx == i) { 3354 V1Elts.push_back(Elt); 3355 V2Elts.push_back(DAG.getConstant(i+8, MaskEVT)); 3356 ++V1InOrder; 3357 } else if (EltIdx == i+8) { 3358 V1Elts.push_back(Elt); 3359 V2Elts.push_back(DAG.getConstant(i, MaskEVT)); 3360 ++V2InOrder; 3361 } else if (EltIdx < 8) { 3362 V1Elts.push_back(Elt); 3363 ++V1FromV1; 3364 } else { 3365 V2Elts.push_back(DAG.getConstant(EltIdx-8, MaskEVT)); 3366 ++V2FromV2; 3367 } 3368 } 3369 3370 if (V2InOrder > V1InOrder) { 3371 PermMask = CommuteVectorShuffleMask(PermMask, DAG); 3372 std::swap(V1, V2); 3373 std::swap(V1Elts, V2Elts); 3374 std::swap(V1FromV1, V2FromV2); 3375 } 3376 3377 if ((V1FromV1 + V1InOrder) != 8) { 3378 // Some elements are from V2. 3379 if (V1FromV1) { 3380 // If there are elements that are from V1 but out of place, 3381 // then first sort them in place 3382 SmallVector<SDOperand, 8> MaskVec; 3383 for (unsigned i = 0; i < 8; ++i) { 3384 SDOperand Elt = V1Elts[i]; 3385 if (Elt.getOpcode() == ISD::UNDEF) { 3386 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3387 continue; 3388 } 3389 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3390 if (EltIdx >= 8) 3391 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3392 else 3393 MaskVec.push_back(DAG.getConstant(EltIdx, MaskEVT)); 3394 } 3395 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3396 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, V1, V1, Mask); 3397 } 3398 3399 NewV = V1; 3400 for (unsigned i = 0; i < 8; ++i) { 3401 SDOperand Elt = V1Elts[i]; 3402 if (Elt.getOpcode() == ISD::UNDEF) 3403 continue; 3404 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3405 if (EltIdx < 8) 3406 continue; 3407 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3408 DAG.getConstant(EltIdx - 8, PtrVT)); 3409 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3410 DAG.getConstant(i, PtrVT)); 3411 } 3412 return NewV; 3413 } else { 3414 // All elements are from V1. 3415 NewV = V1; 3416 for (unsigned i = 0; i < 8; ++i) { 3417 SDOperand Elt = V1Elts[i]; 3418 if (Elt.getOpcode() == ISD::UNDEF) 3419 continue; 3420 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3421 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3422 DAG.getConstant(EltIdx, PtrVT)); 3423 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3424 DAG.getConstant(i, PtrVT)); 3425 } 3426 return NewV; 3427 } 3428} 3429 3430/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 3431/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be 3432/// done when every pair / quad of shuffle mask elements point to elements in 3433/// the right sequence. e.g. 3434/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15> 3435static 3436SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2, 3437 MVT::ValueType VT, 3438 SDOperand PermMask, SelectionDAG &DAG, 3439 TargetLowering &TLI) { 3440 unsigned NumElems = PermMask.getNumOperands(); 3441 unsigned NewWidth = (NumElems == 4) ? 2 : 4; 3442 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NewWidth); 3443 MVT::ValueType NewVT = MaskVT; 3444 switch (VT) { 3445 case MVT::v4f32: NewVT = MVT::v2f64; break; 3446 case MVT::v4i32: NewVT = MVT::v2i64; break; 3447 case MVT::v8i16: NewVT = MVT::v4i32; break; 3448 case MVT::v16i8: NewVT = MVT::v4i32; break; 3449 default: assert(false && "Unexpected!"); 3450 } 3451 3452 if (NewWidth == 2) { 3453 if (MVT::isInteger(VT)) 3454 NewVT = MVT::v2i64; 3455 else 3456 NewVT = MVT::v2f64; 3457 } 3458 unsigned Scale = NumElems / NewWidth; 3459 SmallVector<SDOperand, 8> MaskVec; 3460 for (unsigned i = 0; i < NumElems; i += Scale) { 3461 unsigned StartIdx = ~0U; 3462 for (unsigned j = 0; j < Scale; ++j) { 3463 SDOperand Elt = PermMask.getOperand(i+j); 3464 if (Elt.getOpcode() == ISD::UNDEF) 3465 continue; 3466 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3467 if (StartIdx == ~0U) 3468 StartIdx = EltIdx - (EltIdx % Scale); 3469 if (EltIdx != StartIdx + j) 3470 return SDOperand(); 3471 } 3472 if (StartIdx == ~0U) 3473 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MVT::i32)); 3474 else 3475 MaskVec.push_back(DAG.getConstant(StartIdx / Scale, MVT::i32)); 3476 } 3477 3478 V1 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V1); 3479 V2 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V2); 3480 return DAG.getNode(ISD::VECTOR_SHUFFLE, NewVT, V1, V2, 3481 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3482 &MaskVec[0], MaskVec.size())); 3483} 3484 3485/// getVZextMovL - Return a zero-extending vector move low node. 3486/// 3487static SDOperand getVZextMovL(MVT::ValueType VT, MVT::ValueType OpVT, 3488 SDOperand SrcOp, SelectionDAG &DAG, 3489 const X86Subtarget *Subtarget) { 3490 if (VT == MVT::v2f64 || VT == MVT::v4f32) { 3491 LoadSDNode *LD = NULL; 3492 if (!isScalarLoadToVector(SrcOp.Val, &LD)) 3493 LD = dyn_cast<LoadSDNode>(SrcOp); 3494 if (!LD) { 3495 // movssrr and movsdrr do not clear top bits. Try to use movd, movq 3496 // instead. 3497 MVT::ValueType EVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; 3498 if ((EVT != MVT::i64 || Subtarget->is64Bit()) && 3499 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR && 3500 SrcOp.getOperand(0).getOpcode() == ISD::BIT_CONVERT && 3501 SrcOp.getOperand(0).getOperand(0).getValueType() == EVT) { 3502 // PR2108 3503 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32; 3504 return DAG.getNode(ISD::BIT_CONVERT, VT, 3505 DAG.getNode(X86ISD::VZEXT_MOVL, OpVT, 3506 DAG.getNode(ISD::SCALAR_TO_VECTOR, OpVT, 3507 SrcOp.getOperand(0).getOperand(0)))); 3508 } 3509 } 3510 } 3511 3512 return DAG.getNode(ISD::BIT_CONVERT, VT, 3513 DAG.getNode(X86ISD::VZEXT_MOVL, OpVT, 3514 DAG.getNode(ISD::BIT_CONVERT, OpVT, SrcOp))); 3515} 3516 3517SDOperand 3518X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 3519 SDOperand V1 = Op.getOperand(0); 3520 SDOperand V2 = Op.getOperand(1); 3521 SDOperand PermMask = Op.getOperand(2); 3522 MVT::ValueType VT = Op.getValueType(); 3523 unsigned NumElems = PermMask.getNumOperands(); 3524 bool isMMX = MVT::getSizeInBits(VT) == 64; 3525 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 3526 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 3527 bool V1IsSplat = false; 3528 bool V2IsSplat = false; 3529 3530 if (isUndefShuffle(Op.Val)) 3531 return DAG.getNode(ISD::UNDEF, VT); 3532 3533 if (isZeroShuffle(Op.Val)) 3534 return getZeroVector(VT, DAG); 3535 3536 if (isIdentityMask(PermMask.Val)) 3537 return V1; 3538 else if (isIdentityMask(PermMask.Val, true)) 3539 return V2; 3540 3541 if (isSplatMask(PermMask.Val)) { 3542 if (isMMX || NumElems < 4) return Op; 3543 // Promote it to a v4{if}32 splat. 3544 return PromoteSplat(Op, DAG, Subtarget->hasSSE2()); 3545 } 3546 3547 // If the shuffle can be profitably rewritten as a narrower shuffle, then 3548 // do it! 3549 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 3550 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3551 if (NewOp.Val) 3552 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3553 } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 3554 // FIXME: Figure out a cleaner way to do this. 3555 // Try to make use of movq to zero out the top part. 3556 if (ISD::isBuildVectorAllZeros(V2.Val)) { 3557 SDOperand NewOp = RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, 3558 DAG, *this); 3559 if (NewOp.Val) { 3560 SDOperand NewV1 = NewOp.getOperand(0); 3561 SDOperand NewV2 = NewOp.getOperand(1); 3562 SDOperand NewMask = NewOp.getOperand(2); 3563 if (isCommutedMOVL(NewMask.Val, true, false)) { 3564 NewOp = CommuteVectorShuffle(NewOp, NewV1, NewV2, NewMask, DAG); 3565 return getVZextMovL(VT, NewOp.getValueType(), NewV2, DAG, Subtarget); 3566 } 3567 } 3568 } else if (ISD::isBuildVectorAllZeros(V1.Val)) { 3569 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, 3570 DAG, *this); 3571 if (NewOp.Val && X86::isMOVLMask(NewOp.getOperand(2).Val)) 3572 return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(1), 3573 DAG, Subtarget); 3574 } 3575 } 3576 3577 if (X86::isMOVLMask(PermMask.Val)) { 3578 if (V1IsUndef) 3579 return V2; 3580 if (ISD::isBuildVectorAllZeros(V1.Val)) 3581 return getVZextMovL(VT, VT, V2, DAG, Subtarget); 3582 return Op; 3583 } 3584 3585 if (X86::isMOVSHDUPMask(PermMask.Val) || 3586 X86::isMOVSLDUPMask(PermMask.Val) || 3587 X86::isMOVHLPSMask(PermMask.Val) || 3588 X86::isMOVHPMask(PermMask.Val) || 3589 X86::isMOVLPMask(PermMask.Val)) 3590 return Op; 3591 3592 if (ShouldXformToMOVHLPS(PermMask.Val) || 3593 ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val)) 3594 return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3595 3596 bool Commuted = false; 3597 // FIXME: This should also accept a bitcast of a splat? Be careful, not 3598 // 1,1,1,1 -> v8i16 though. 3599 V1IsSplat = isSplatVector(V1.Val); 3600 V2IsSplat = isSplatVector(V2.Val); 3601 3602 // Canonicalize the splat or undef, if present, to be on the RHS. 3603 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 3604 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3605 std::swap(V1IsSplat, V2IsSplat); 3606 std::swap(V1IsUndef, V2IsUndef); 3607 Commuted = true; 3608 } 3609 3610 // FIXME: Figure out a cleaner way to do this. 3611 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) { 3612 if (V2IsUndef) return V1; 3613 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3614 if (V2IsSplat) { 3615 // V2 is a splat, so the mask may be malformed. That is, it may point 3616 // to any V2 element. The instruction selectior won't like this. Get 3617 // a corrected mask and commute to form a proper MOVS{S|D}. 3618 SDOperand NewMask = getMOVLMask(NumElems, DAG); 3619 if (NewMask.Val != PermMask.Val) 3620 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3621 } 3622 return Op; 3623 } 3624 3625 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3626 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3627 X86::isUNPCKLMask(PermMask.Val) || 3628 X86::isUNPCKHMask(PermMask.Val)) 3629 return Op; 3630 3631 if (V2IsSplat) { 3632 // Normalize mask so all entries that point to V2 points to its first 3633 // element then try to match unpck{h|l} again. If match, return a 3634 // new vector_shuffle with the corrected mask. 3635 SDOperand NewMask = NormalizeMask(PermMask, DAG); 3636 if (NewMask.Val != PermMask.Val) { 3637 if (X86::isUNPCKLMask(PermMask.Val, true)) { 3638 SDOperand NewMask = getUnpacklMask(NumElems, DAG); 3639 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3640 } else if (X86::isUNPCKHMask(PermMask.Val, true)) { 3641 SDOperand NewMask = getUnpackhMask(NumElems, DAG); 3642 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3643 } 3644 } 3645 } 3646 3647 // Normalize the node to match x86 shuffle ops if needed 3648 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val)) 3649 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3650 3651 if (Commuted) { 3652 // Commute is back and try unpck* again. 3653 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3654 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3655 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3656 X86::isUNPCKLMask(PermMask.Val) || 3657 X86::isUNPCKHMask(PermMask.Val)) 3658 return Op; 3659 } 3660 3661 // Try PSHUF* first, then SHUFP*. 3662 // MMX doesn't have PSHUFD but it does have PSHUFW. While it's theoretically 3663 // possible to shuffle a v2i32 using PSHUFW, that's not yet implemented. 3664 if (isMMX && NumElems == 4 && X86::isPSHUFDMask(PermMask.Val)) { 3665 if (V2.getOpcode() != ISD::UNDEF) 3666 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3667 DAG.getNode(ISD::UNDEF, VT), PermMask); 3668 return Op; 3669 } 3670 3671 if (!isMMX) { 3672 if (Subtarget->hasSSE2() && 3673 (X86::isPSHUFDMask(PermMask.Val) || 3674 X86::isPSHUFHWMask(PermMask.Val) || 3675 X86::isPSHUFLWMask(PermMask.Val))) { 3676 MVT::ValueType RVT = VT; 3677 if (VT == MVT::v4f32) { 3678 RVT = MVT::v4i32; 3679 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, RVT, 3680 DAG.getNode(ISD::BIT_CONVERT, RVT, V1), 3681 DAG.getNode(ISD::UNDEF, RVT), PermMask); 3682 } else if (V2.getOpcode() != ISD::UNDEF) 3683 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, RVT, V1, 3684 DAG.getNode(ISD::UNDEF, RVT), PermMask); 3685 if (RVT != VT) 3686 Op = DAG.getNode(ISD::BIT_CONVERT, VT, Op); 3687 return Op; 3688 } 3689 3690 // Binary or unary shufps. 3691 if (X86::isSHUFPMask(PermMask.Val) || 3692 (V2.getOpcode() == ISD::UNDEF && X86::isPSHUFDMask(PermMask.Val))) 3693 return Op; 3694 } 3695 3696 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 3697 if (VT == MVT::v8i16) { 3698 SDOperand NewOp = LowerVECTOR_SHUFFLEv8i16(V1, V2, PermMask, DAG, *this); 3699 if (NewOp.Val) 3700 return NewOp; 3701 } 3702 3703 // Handle all 4 wide cases with a number of shuffles. 3704 if (NumElems == 4 && !isMMX) { 3705 // Don't do this for MMX. 3706 MVT::ValueType MaskVT = PermMask.getValueType(); 3707 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3708 SmallVector<std::pair<int, int>, 8> Locs; 3709 Locs.reserve(NumElems); 3710 SmallVector<SDOperand, 8> Mask1(NumElems, 3711 DAG.getNode(ISD::UNDEF, MaskEVT)); 3712 SmallVector<SDOperand, 8> Mask2(NumElems, 3713 DAG.getNode(ISD::UNDEF, MaskEVT)); 3714 unsigned NumHi = 0; 3715 unsigned NumLo = 0; 3716 // If no more than two elements come from either vector. This can be 3717 // implemented with two shuffles. First shuffle gather the elements. 3718 // The second shuffle, which takes the first shuffle as both of its 3719 // vector operands, put the elements into the right order. 3720 for (unsigned i = 0; i != NumElems; ++i) { 3721 SDOperand Elt = PermMask.getOperand(i); 3722 if (Elt.getOpcode() == ISD::UNDEF) { 3723 Locs[i] = std::make_pair(-1, -1); 3724 } else { 3725 unsigned Val = cast<ConstantSDNode>(Elt)->getValue(); 3726 if (Val < NumElems) { 3727 Locs[i] = std::make_pair(0, NumLo); 3728 Mask1[NumLo] = Elt; 3729 NumLo++; 3730 } else { 3731 Locs[i] = std::make_pair(1, NumHi); 3732 if (2+NumHi < NumElems) 3733 Mask1[2+NumHi] = Elt; 3734 NumHi++; 3735 } 3736 } 3737 } 3738 if (NumLo <= 2 && NumHi <= 2) { 3739 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3740 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3741 &Mask1[0], Mask1.size())); 3742 for (unsigned i = 0; i != NumElems; ++i) { 3743 if (Locs[i].first == -1) 3744 continue; 3745 else { 3746 unsigned Idx = (i < NumElems/2) ? 0 : NumElems; 3747 Idx += Locs[i].first * (NumElems/2) + Locs[i].second; 3748 Mask2[i] = DAG.getConstant(Idx, MaskEVT); 3749 } 3750 } 3751 3752 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, 3753 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3754 &Mask2[0], Mask2.size())); 3755 } 3756 3757 // Break it into (shuffle shuffle_hi, shuffle_lo). 3758 Locs.clear(); 3759 SmallVector<SDOperand,8> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3760 SmallVector<SDOperand,8> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3761 SmallVector<SDOperand,8> *MaskPtr = &LoMask; 3762 unsigned MaskIdx = 0; 3763 unsigned LoIdx = 0; 3764 unsigned HiIdx = NumElems/2; 3765 for (unsigned i = 0; i != NumElems; ++i) { 3766 if (i == NumElems/2) { 3767 MaskPtr = &HiMask; 3768 MaskIdx = 1; 3769 LoIdx = 0; 3770 HiIdx = NumElems/2; 3771 } 3772 SDOperand Elt = PermMask.getOperand(i); 3773 if (Elt.getOpcode() == ISD::UNDEF) { 3774 Locs[i] = std::make_pair(-1, -1); 3775 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) { 3776 Locs[i] = std::make_pair(MaskIdx, LoIdx); 3777 (*MaskPtr)[LoIdx] = Elt; 3778 LoIdx++; 3779 } else { 3780 Locs[i] = std::make_pair(MaskIdx, HiIdx); 3781 (*MaskPtr)[HiIdx] = Elt; 3782 HiIdx++; 3783 } 3784 } 3785 3786 SDOperand LoShuffle = 3787 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3788 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3789 &LoMask[0], LoMask.size())); 3790 SDOperand HiShuffle = 3791 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3792 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3793 &HiMask[0], HiMask.size())); 3794 SmallVector<SDOperand, 8> MaskOps; 3795 for (unsigned i = 0; i != NumElems; ++i) { 3796 if (Locs[i].first == -1) { 3797 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3798 } else { 3799 unsigned Idx = Locs[i].first * NumElems + Locs[i].second; 3800 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT)); 3801 } 3802 } 3803 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle, 3804 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3805 &MaskOps[0], MaskOps.size())); 3806 } 3807 3808 return SDOperand(); 3809} 3810 3811SDOperand 3812X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDOperand Op, 3813 SelectionDAG &DAG) { 3814 MVT::ValueType VT = Op.getValueType(); 3815 if (MVT::getSizeInBits(VT) == 8) { 3816 SDOperand Extract = DAG.getNode(X86ISD::PEXTRB, MVT::i32, 3817 Op.getOperand(0), Op.getOperand(1)); 3818 SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, 3819 DAG.getValueType(VT)); 3820 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3821 } else if (MVT::getSizeInBits(VT) == 16) { 3822 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, MVT::i32, 3823 Op.getOperand(0), Op.getOperand(1)); 3824 SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, 3825 DAG.getValueType(VT)); 3826 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3827 } else if (VT == MVT::f32) { 3828 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy 3829 // the result back to FR32 register. It's only worth matching if the 3830 // result has a single use which is a store or a bitcast to i32. 3831 if (!Op.hasOneUse()) 3832 return SDOperand(); 3833 SDNode *User = Op.Val->use_begin()->getUser(); 3834 if (User->getOpcode() != ISD::STORE && 3835 (User->getOpcode() != ISD::BIT_CONVERT || 3836 User->getValueType(0) != MVT::i32)) 3837 return SDOperand(); 3838 SDOperand Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, 3839 DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Op.getOperand(0)), 3840 Op.getOperand(1)); 3841 return DAG.getNode(ISD::BIT_CONVERT, MVT::f32, Extract); 3842 } 3843 return SDOperand(); 3844} 3845 3846 3847SDOperand 3848X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3849 if (!isa<ConstantSDNode>(Op.getOperand(1))) 3850 return SDOperand(); 3851 3852 if (Subtarget->hasSSE41()) { 3853 SDOperand Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 3854 if (Res.Val) 3855 return Res; 3856 } 3857 3858 MVT::ValueType VT = Op.getValueType(); 3859 // TODO: handle v16i8. 3860 if (MVT::getSizeInBits(VT) == 16) { 3861 SDOperand Vec = Op.getOperand(0); 3862 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3863 if (Idx == 0) 3864 return DAG.getNode(ISD::TRUNCATE, MVT::i16, 3865 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, 3866 DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Vec), 3867 Op.getOperand(1))); 3868 // Transform it so it match pextrw which produces a 32-bit result. 3869 MVT::ValueType EVT = (MVT::ValueType)(VT+1); 3870 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, 3871 Op.getOperand(0), Op.getOperand(1)); 3872 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, 3873 DAG.getValueType(VT)); 3874 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3875 } else if (MVT::getSizeInBits(VT) == 32) { 3876 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3877 if (Idx == 0) 3878 return Op; 3879 // SHUFPS the element to the lowest double word, then movss. 3880 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3881 SmallVector<SDOperand, 8> IdxVec; 3882 IdxVec. 3883 push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT))); 3884 IdxVec. 3885 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3886 IdxVec. 3887 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3888 IdxVec. 3889 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3890 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3891 &IdxVec[0], IdxVec.size()); 3892 SDOperand Vec = Op.getOperand(0); 3893 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3894 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3895 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3896 DAG.getIntPtrConstant(0)); 3897 } else if (MVT::getSizeInBits(VT) == 64) { 3898 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 3899 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 3900 // to match extract_elt for f64. 3901 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3902 if (Idx == 0) 3903 return Op; 3904 3905 // UNPCKHPD the element to the lowest double word, then movsd. 3906 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 3907 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 3908 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3909 SmallVector<SDOperand, 8> IdxVec; 3910 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT))); 3911 IdxVec. 3912 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3913 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3914 &IdxVec[0], IdxVec.size()); 3915 SDOperand Vec = Op.getOperand(0); 3916 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3917 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3918 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3919 DAG.getIntPtrConstant(0)); 3920 } 3921 3922 return SDOperand(); 3923} 3924 3925SDOperand 3926X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG){ 3927 MVT::ValueType VT = Op.getValueType(); 3928 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3929 3930 SDOperand N0 = Op.getOperand(0); 3931 SDOperand N1 = Op.getOperand(1); 3932 SDOperand N2 = Op.getOperand(2); 3933 3934 if ((MVT::getSizeInBits(EVT) == 8) || (MVT::getSizeInBits(EVT) == 16)) { 3935 unsigned Opc = (MVT::getSizeInBits(EVT) == 8) ? X86ISD::PINSRB 3936 : X86ISD::PINSRW; 3937 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 3938 // argument. 3939 if (N1.getValueType() != MVT::i32) 3940 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3941 if (N2.getValueType() != MVT::i32) 3942 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); 3943 return DAG.getNode(Opc, VT, N0, N1, N2); 3944 } else if (EVT == MVT::f32) { 3945 // Bits [7:6] of the constant are the source select. This will always be 3946 // zero here. The DAG Combiner may combine an extract_elt index into these 3947 // bits. For example (insert (extract, 3), 2) could be matched by putting 3948 // the '3' into bits [7:6] of X86ISD::INSERTPS. 3949 // Bits [5:4] of the constant are the destination select. This is the 3950 // value of the incoming immediate. 3951 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 3952 // combine either bitwise AND or insert of float 0.0 to set these bits. 3953 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue() << 4); 3954 return DAG.getNode(X86ISD::INSERTPS, VT, N0, N1, N2); 3955 } 3956 return SDOperand(); 3957} 3958 3959SDOperand 3960X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3961 MVT::ValueType VT = Op.getValueType(); 3962 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3963 3964 if (Subtarget->hasSSE41()) 3965 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 3966 3967 if (EVT == MVT::i8) 3968 return SDOperand(); 3969 3970 SDOperand N0 = Op.getOperand(0); 3971 SDOperand N1 = Op.getOperand(1); 3972 SDOperand N2 = Op.getOperand(2); 3973 3974 if (MVT::getSizeInBits(EVT) == 16) { 3975 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 3976 // as its second argument. 3977 if (N1.getValueType() != MVT::i32) 3978 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3979 if (N2.getValueType() != MVT::i32) 3980 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); 3981 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2); 3982 } 3983 return SDOperand(); 3984} 3985 3986SDOperand 3987X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3988 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); 3989 MVT::ValueType VT = MVT::v2i32; 3990 switch (Op.getValueType()) { 3991 default: break; 3992 case MVT::v16i8: 3993 case MVT::v8i16: 3994 VT = MVT::v4i32; 3995 break; 3996 } 3997 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), 3998 DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, AnyExt)); 3999} 4000 4001// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 4002// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 4003// one of the above mentioned nodes. It has to be wrapped because otherwise 4004// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 4005// be used to form addressing mode. These wrapped nodes will be selected 4006// into MOV32ri. 4007SDOperand 4008X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 4009 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 4010 SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(), 4011 getPointerTy(), 4012 CP->getAlignment()); 4013 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4014 // With PIC, the address is actually $g + Offset. 4015 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4016 !Subtarget->isPICStyleRIPRel()) { 4017 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4018 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4019 Result); 4020 } 4021 4022 return Result; 4023} 4024 4025SDOperand 4026X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 4027 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 4028 SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); 4029 // If it's a debug information descriptor, don't mess with it. 4030 if (DAG.isVerifiedDebugInfoDesc(Op)) 4031 return Result; 4032 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4033 // With PIC, the address is actually $g + Offset. 4034 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4035 !Subtarget->isPICStyleRIPRel()) { 4036 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4037 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4038 Result); 4039 } 4040 4041 // For Darwin & Mingw32, external and weak symbols are indirect, so we want to 4042 // load the value at address GV, not the value of GV itself. This means that 4043 // the GlobalAddress must be in the base or index register of the address, not 4044 // the GV offset field. Platform check is inside GVRequiresExtraLoad() call 4045 // The same applies for external symbols during PIC codegen 4046 if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)) 4047 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, 4048 PseudoSourceValue::getGOT(), 0); 4049 4050 return Result; 4051} 4052 4053// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit 4054static SDOperand 4055LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, 4056 const MVT::ValueType PtrVT) { 4057 SDOperand InFlag; 4058 SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX, 4059 DAG.getNode(X86ISD::GlobalBaseReg, 4060 PtrVT), InFlag); 4061 InFlag = Chain.getValue(1); 4062 4063 // emit leal symbol@TLSGD(,%ebx,1), %eax 4064 SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag); 4065 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 4066 GA->getValueType(0), 4067 GA->getOffset()); 4068 SDOperand Ops[] = { Chain, TGA, InFlag }; 4069 SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3); 4070 InFlag = Result.getValue(2); 4071 Chain = Result.getValue(1); 4072 4073 // call ___tls_get_addr. This function receives its argument in 4074 // the register EAX. 4075 Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag); 4076 InFlag = Chain.getValue(1); 4077 4078 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4079 SDOperand Ops1[] = { Chain, 4080 DAG.getTargetExternalSymbol("___tls_get_addr", 4081 PtrVT), 4082 DAG.getRegister(X86::EAX, PtrVT), 4083 DAG.getRegister(X86::EBX, PtrVT), 4084 InFlag }; 4085 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5); 4086 InFlag = Chain.getValue(1); 4087 4088 return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag); 4089} 4090 4091// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit 4092static SDOperand 4093LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, 4094 const MVT::ValueType PtrVT) { 4095 SDOperand InFlag, Chain; 4096 4097 // emit leaq symbol@TLSGD(%rip), %rdi 4098 SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag); 4099 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 4100 GA->getValueType(0), 4101 GA->getOffset()); 4102 SDOperand Ops[] = { DAG.getEntryNode(), TGA}; 4103 SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 2); 4104 Chain = Result.getValue(1); 4105 InFlag = Result.getValue(2); 4106 4107 // call ___tls_get_addr. This function receives its argument in 4108 // the register RDI. 4109 Chain = DAG.getCopyToReg(Chain, X86::RDI, Result, InFlag); 4110 InFlag = Chain.getValue(1); 4111 4112 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4113 SDOperand Ops1[] = { Chain, 4114 DAG.getTargetExternalSymbol("___tls_get_addr", 4115 PtrVT), 4116 DAG.getRegister(X86::RDI, PtrVT), 4117 InFlag }; 4118 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 4); 4119 InFlag = Chain.getValue(1); 4120 4121 return DAG.getCopyFromReg(Chain, X86::RAX, PtrVT, InFlag); 4122} 4123 4124// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 4125// "local exec" model. 4126static SDOperand 4127LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 4128 const MVT::ValueType PtrVT) { 4129 // Get the Thread Pointer 4130 SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT); 4131 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 4132 // exec) 4133 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 4134 GA->getValueType(0), 4135 GA->getOffset()); 4136 SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA); 4137 4138 if (GA->getGlobal()->isDeclaration()) // initial exec TLS model 4139 Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, 4140 PseudoSourceValue::getGOT(), 0); 4141 4142 // The address of the thread local variable is the add of the thread 4143 // pointer with the offset of the variable. 4144 return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset); 4145} 4146 4147SDOperand 4148X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { 4149 // TODO: implement the "local dynamic" model 4150 // TODO: implement the "initial exec"model for pic executables 4151 assert(Subtarget->isTargetELF() && 4152 "TLS not implemented for non-ELF targets"); 4153 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 4154 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 4155 // otherwise use the "Local Exec"TLS Model 4156 if (Subtarget->is64Bit()) { 4157 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy()); 4158 } else { 4159 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 4160 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy()); 4161 else 4162 return LowerToTLSExecModel(GA, DAG, getPointerTy()); 4163 } 4164} 4165 4166SDOperand 4167X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { 4168 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 4169 SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 4170 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4171 // With PIC, the address is actually $g + Offset. 4172 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4173 !Subtarget->isPICStyleRIPRel()) { 4174 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4175 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4176 Result); 4177 } 4178 4179 return Result; 4180} 4181 4182SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 4183 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 4184 SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); 4185 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4186 // With PIC, the address is actually $g + Offset. 4187 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4188 !Subtarget->isPICStyleRIPRel()) { 4189 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4190 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4191 Result); 4192 } 4193 4194 return Result; 4195} 4196 4197/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and 4198/// take a 2 x i32 value to shift plus a shift amount. 4199SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { 4200 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 4201 MVT::ValueType VT = Op.getValueType(); 4202 unsigned VTBits = MVT::getSizeInBits(VT); 4203 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 4204 SDOperand ShOpLo = Op.getOperand(0); 4205 SDOperand ShOpHi = Op.getOperand(1); 4206 SDOperand ShAmt = Op.getOperand(2); 4207 SDOperand Tmp1 = isSRA ? 4208 DAG.getNode(ISD::SRA, VT, ShOpHi, DAG.getConstant(VTBits - 1, MVT::i8)) : 4209 DAG.getConstant(0, VT); 4210 4211 SDOperand Tmp2, Tmp3; 4212 if (Op.getOpcode() == ISD::SHL_PARTS) { 4213 Tmp2 = DAG.getNode(X86ISD::SHLD, VT, ShOpHi, ShOpLo, ShAmt); 4214 Tmp3 = DAG.getNode(ISD::SHL, VT, ShOpLo, ShAmt); 4215 } else { 4216 Tmp2 = DAG.getNode(X86ISD::SHRD, VT, ShOpLo, ShOpHi, ShAmt); 4217 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, VT, ShOpHi, ShAmt); 4218 } 4219 4220 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4221 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, 4222 DAG.getConstant(VTBits, MVT::i8)); 4223 SDOperand Cond = DAG.getNode(X86ISD::CMP, VT, 4224 AndNode, DAG.getConstant(0, MVT::i8)); 4225 4226 SDOperand Hi, Lo; 4227 SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4228 VTs = DAG.getNodeValueTypes(VT, MVT::Flag); 4229 SmallVector<SDOperand, 4> Ops; 4230 if (Op.getOpcode() == ISD::SHL_PARTS) { 4231 Ops.push_back(Tmp2); 4232 Ops.push_back(Tmp3); 4233 Ops.push_back(CC); 4234 Ops.push_back(Cond); 4235 Hi = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4236 4237 Ops.clear(); 4238 Ops.push_back(Tmp3); 4239 Ops.push_back(Tmp1); 4240 Ops.push_back(CC); 4241 Ops.push_back(Cond); 4242 Lo = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4243 } else { 4244 Ops.push_back(Tmp2); 4245 Ops.push_back(Tmp3); 4246 Ops.push_back(CC); 4247 Ops.push_back(Cond); 4248 Lo = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4249 4250 Ops.clear(); 4251 Ops.push_back(Tmp3); 4252 Ops.push_back(Tmp1); 4253 Ops.push_back(CC); 4254 Ops.push_back(Cond); 4255 Hi = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4256 } 4257 4258 VTs = DAG.getNodeValueTypes(VT, VT); 4259 Ops.clear(); 4260 Ops.push_back(Lo); 4261 Ops.push_back(Hi); 4262 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size()); 4263} 4264 4265SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 4266 MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); 4267 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 && 4268 "Unknown SINT_TO_FP to lower!"); 4269 4270 // These are really Legal; caller falls through into that case. 4271 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 4272 return SDOperand(); 4273 if (SrcVT == MVT::i64 && Op.getValueType() != MVT::f80 && 4274 Subtarget->is64Bit()) 4275 return SDOperand(); 4276 4277 unsigned Size = MVT::getSizeInBits(SrcVT)/8; 4278 MachineFunction &MF = DAG.getMachineFunction(); 4279 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 4280 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4281 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), 4282 StackSlot, 4283 PseudoSourceValue::getFixedStack(), 4284 SSFI); 4285 4286 // Build the FILD 4287 SDVTList Tys; 4288 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 4289 if (useSSE) 4290 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 4291 else 4292 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 4293 SmallVector<SDOperand, 8> Ops; 4294 Ops.push_back(Chain); 4295 Ops.push_back(StackSlot); 4296 Ops.push_back(DAG.getValueType(SrcVT)); 4297 SDOperand Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, 4298 Tys, &Ops[0], Ops.size()); 4299 4300 if (useSSE) { 4301 Chain = Result.getValue(1); 4302 SDOperand InFlag = Result.getValue(2); 4303 4304 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 4305 // shouldn't be necessary except that RFP cannot be live across 4306 // multiple blocks. When stackifier is fixed, they can be uncoupled. 4307 MachineFunction &MF = DAG.getMachineFunction(); 4308 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 4309 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4310 Tys = DAG.getVTList(MVT::Other); 4311 SmallVector<SDOperand, 8> Ops; 4312 Ops.push_back(Chain); 4313 Ops.push_back(Result); 4314 Ops.push_back(StackSlot); 4315 Ops.push_back(DAG.getValueType(Op.getValueType())); 4316 Ops.push_back(InFlag); 4317 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 4318 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, 4319 PseudoSourceValue::getFixedStack(), SSFI); 4320 } 4321 4322 return Result; 4323} 4324 4325std::pair<SDOperand,SDOperand> X86TargetLowering:: 4326FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) { 4327 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 && 4328 "Unknown FP_TO_SINT to lower!"); 4329 4330 // These are really Legal. 4331 if (Op.getValueType() == MVT::i32 && 4332 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 4333 return std::make_pair(SDOperand(), SDOperand()); 4334 if (Subtarget->is64Bit() && 4335 Op.getValueType() == MVT::i64 && 4336 Op.getOperand(0).getValueType() != MVT::f80) 4337 return std::make_pair(SDOperand(), SDOperand()); 4338 4339 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 4340 // stack slot. 4341 MachineFunction &MF = DAG.getMachineFunction(); 4342 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; 4343 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4344 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4345 unsigned Opc; 4346 switch (Op.getValueType()) { 4347 default: assert(0 && "Invalid FP_TO_SINT to lower!"); 4348 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 4349 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 4350 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 4351 } 4352 4353 SDOperand Chain = DAG.getEntryNode(); 4354 SDOperand Value = Op.getOperand(0); 4355 if (isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) { 4356 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 4357 Chain = DAG.getStore(Chain, Value, StackSlot, 4358 PseudoSourceValue::getFixedStack(), SSFI); 4359 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 4360 SDOperand Ops[] = { 4361 Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType()) 4362 }; 4363 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 4364 Chain = Value.getValue(1); 4365 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4366 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4367 } 4368 4369 // Build the FP_TO_INT*_IN_MEM 4370 SDOperand Ops[] = { Chain, Value, StackSlot }; 4371 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3); 4372 4373 return std::make_pair(FIST, StackSlot); 4374} 4375 4376SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 4377 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(Op, DAG); 4378 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4379 if (FIST.Val == 0) return SDOperand(); 4380 4381 // Load the result. 4382 return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0); 4383} 4384 4385SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) { 4386 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(SDOperand(N, 0), DAG); 4387 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4388 if (FIST.Val == 0) return 0; 4389 4390 // Return an i64 load from the stack slot. 4391 SDOperand Res = DAG.getLoad(MVT::i64, FIST, StackSlot, NULL, 0); 4392 4393 // Use a MERGE_VALUES node to drop the chain result value. 4394 return DAG.getNode(ISD::MERGE_VALUES, MVT::i64, Res).Val; 4395} 4396 4397SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { 4398 MVT::ValueType VT = Op.getValueType(); 4399 MVT::ValueType EltVT = VT; 4400 if (MVT::isVector(VT)) 4401 EltVT = MVT::getVectorElementType(VT); 4402 std::vector<Constant*> CV; 4403 if (EltVT == MVT::f64) { 4404 Constant *C = ConstantFP::get(APFloat(APInt(64, ~(1ULL << 63)))); 4405 CV.push_back(C); 4406 CV.push_back(C); 4407 } else { 4408 Constant *C = ConstantFP::get(APFloat(APInt(32, ~(1U << 31)))); 4409 CV.push_back(C); 4410 CV.push_back(C); 4411 CV.push_back(C); 4412 CV.push_back(C); 4413 } 4414 Constant *C = ConstantVector::get(CV); 4415 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4416 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4417 PseudoSourceValue::getConstantPool(), 0, 4418 false, 16); 4419 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask); 4420} 4421 4422SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { 4423 MVT::ValueType VT = Op.getValueType(); 4424 MVT::ValueType EltVT = VT; 4425 unsigned EltNum = 1; 4426 if (MVT::isVector(VT)) { 4427 EltVT = MVT::getVectorElementType(VT); 4428 EltNum = MVT::getVectorNumElements(VT); 4429 } 4430 std::vector<Constant*> CV; 4431 if (EltVT == MVT::f64) { 4432 Constant *C = ConstantFP::get(APFloat(APInt(64, 1ULL << 63))); 4433 CV.push_back(C); 4434 CV.push_back(C); 4435 } else { 4436 Constant *C = ConstantFP::get(APFloat(APInt(32, 1U << 31))); 4437 CV.push_back(C); 4438 CV.push_back(C); 4439 CV.push_back(C); 4440 CV.push_back(C); 4441 } 4442 Constant *C = ConstantVector::get(CV); 4443 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4444 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4445 PseudoSourceValue::getConstantPool(), 0, 4446 false, 16); 4447 if (MVT::isVector(VT)) { 4448 return DAG.getNode(ISD::BIT_CONVERT, VT, 4449 DAG.getNode(ISD::XOR, MVT::v2i64, 4450 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)), 4451 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Mask))); 4452 } else { 4453 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask); 4454 } 4455} 4456 4457SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { 4458 SDOperand Op0 = Op.getOperand(0); 4459 SDOperand Op1 = Op.getOperand(1); 4460 MVT::ValueType VT = Op.getValueType(); 4461 MVT::ValueType SrcVT = Op1.getValueType(); 4462 4463 // If second operand is smaller, extend it first. 4464 if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) { 4465 Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1); 4466 SrcVT = VT; 4467 } 4468 // And if it is bigger, shrink it first. 4469 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4470 Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1, DAG.getIntPtrConstant(1)); 4471 SrcVT = VT; 4472 } 4473 4474 // At this point the operands and the result should have the same 4475 // type, and that won't be f80 since that is not custom lowered. 4476 4477 // First get the sign bit of second operand. 4478 std::vector<Constant*> CV; 4479 if (SrcVT == MVT::f64) { 4480 CV.push_back(ConstantFP::get(APFloat(APInt(64, 1ULL << 63)))); 4481 CV.push_back(ConstantFP::get(APFloat(APInt(64, 0)))); 4482 } else { 4483 CV.push_back(ConstantFP::get(APFloat(APInt(32, 1U << 31)))); 4484 CV.push_back(ConstantFP::get(APFloat(APInt(32, 0)))); 4485 CV.push_back(ConstantFP::get(APFloat(APInt(32, 0)))); 4486 CV.push_back(ConstantFP::get(APFloat(APInt(32, 0)))); 4487 } 4488 Constant *C = ConstantVector::get(CV); 4489 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4490 SDOperand Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx, 4491 PseudoSourceValue::getConstantPool(), 0, 4492 false, 16); 4493 SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); 4494 4495 // Shift sign bit right or left if the two operands have different types. 4496 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4497 // Op0 is MVT::f32, Op1 is MVT::f64. 4498 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit); 4499 SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit, 4500 DAG.getConstant(32, MVT::i32)); 4501 SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit); 4502 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit, 4503 DAG.getIntPtrConstant(0)); 4504 } 4505 4506 // Clear first operand sign bit. 4507 CV.clear(); 4508 if (VT == MVT::f64) { 4509 CV.push_back(ConstantFP::get(APFloat(APInt(64, ~(1ULL << 63))))); 4510 CV.push_back(ConstantFP::get(APFloat(APInt(64, 0)))); 4511 } else { 4512 CV.push_back(ConstantFP::get(APFloat(APInt(32, ~(1U << 31))))); 4513 CV.push_back(ConstantFP::get(APFloat(APInt(32, 0)))); 4514 CV.push_back(ConstantFP::get(APFloat(APInt(32, 0)))); 4515 CV.push_back(ConstantFP::get(APFloat(APInt(32, 0)))); 4516 } 4517 C = ConstantVector::get(CV); 4518 CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4519 SDOperand Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4520 PseudoSourceValue::getConstantPool(), 0, 4521 false, 16); 4522 SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2); 4523 4524 // Or the value with the sign bit. 4525 return DAG.getNode(X86ISD::FOR, VT, Val, SignBit); 4526} 4527 4528SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { 4529 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 4530 SDOperand Cond; 4531 SDOperand Op0 = Op.getOperand(0); 4532 SDOperand Op1 = Op.getOperand(1); 4533 SDOperand CC = Op.getOperand(2); 4534 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 4535 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); 4536 unsigned X86CC; 4537 4538 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, 4539 Op0, Op1, DAG)) { 4540 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4541 return DAG.getNode(X86ISD::SETCC, MVT::i8, 4542 DAG.getConstant(X86CC, MVT::i8), Cond); 4543 } 4544 4545 assert(isFP && "Illegal integer SetCC!"); 4546 4547 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4548 switch (SetCCOpcode) { 4549 default: assert(false && "Illegal floating point SetCC!"); 4550 case ISD::SETOEQ: { // !PF & ZF 4551 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4552 DAG.getConstant(X86::COND_NP, MVT::i8), Cond); 4553 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4554 DAG.getConstant(X86::COND_E, MVT::i8), Cond); 4555 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); 4556 } 4557 case ISD::SETUNE: { // PF | !ZF 4558 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4559 DAG.getConstant(X86::COND_P, MVT::i8), Cond); 4560 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4561 DAG.getConstant(X86::COND_NE, MVT::i8), Cond); 4562 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); 4563 } 4564 } 4565} 4566 4567 4568SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { 4569 bool addTest = true; 4570 SDOperand Cond = Op.getOperand(0); 4571 SDOperand CC; 4572 4573 if (Cond.getOpcode() == ISD::SETCC) 4574 Cond = LowerSETCC(Cond, DAG); 4575 4576 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4577 // setting operand in place of the X86ISD::SETCC. 4578 if (Cond.getOpcode() == X86ISD::SETCC) { 4579 CC = Cond.getOperand(0); 4580 4581 SDOperand Cmp = Cond.getOperand(1); 4582 unsigned Opc = Cmp.getOpcode(); 4583 MVT::ValueType VT = Op.getValueType(); 4584 4585 bool IllegalFPCMov = false; 4586 if (MVT::isFloatingPoint(VT) && !MVT::isVector(VT) && 4587 !isScalarFPTypeInSSEReg(VT)) // FPStack? 4588 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 4589 4590 if ((Opc == X86ISD::CMP || 4591 Opc == X86ISD::COMI || 4592 Opc == X86ISD::UCOMI) && !IllegalFPCMov) { 4593 Cond = Cmp; 4594 addTest = false; 4595 } 4596 } 4597 4598 if (addTest) { 4599 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4600 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4601 } 4602 4603 const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(), 4604 MVT::Flag); 4605 SmallVector<SDOperand, 4> Ops; 4606 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 4607 // condition is true. 4608 Ops.push_back(Op.getOperand(2)); 4609 Ops.push_back(Op.getOperand(1)); 4610 Ops.push_back(CC); 4611 Ops.push_back(Cond); 4612 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 4613} 4614 4615SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { 4616 bool addTest = true; 4617 SDOperand Chain = Op.getOperand(0); 4618 SDOperand Cond = Op.getOperand(1); 4619 SDOperand Dest = Op.getOperand(2); 4620 SDOperand CC; 4621 4622 if (Cond.getOpcode() == ISD::SETCC) 4623 Cond = LowerSETCC(Cond, DAG); 4624 4625 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4626 // setting operand in place of the X86ISD::SETCC. 4627 if (Cond.getOpcode() == X86ISD::SETCC) { 4628 CC = Cond.getOperand(0); 4629 4630 SDOperand Cmp = Cond.getOperand(1); 4631 unsigned Opc = Cmp.getOpcode(); 4632 if (Opc == X86ISD::CMP || 4633 Opc == X86ISD::COMI || 4634 Opc == X86ISD::UCOMI) { 4635 Cond = Cmp; 4636 addTest = false; 4637 } 4638 } 4639 4640 if (addTest) { 4641 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4642 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4643 } 4644 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), 4645 Chain, Op.getOperand(2), CC, Cond); 4646} 4647 4648 4649// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 4650// Calls to _alloca is needed to probe the stack when allocating more than 4k 4651// bytes in one go. Touching the stack at 4K increments is necessary to ensure 4652// that the guard pages used by the OS virtual memory manager are allocated in 4653// correct sequence. 4654SDOperand 4655X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, 4656 SelectionDAG &DAG) { 4657 assert(Subtarget->isTargetCygMing() && 4658 "This should be used only on Cygwin/Mingw targets"); 4659 4660 // Get the inputs. 4661 SDOperand Chain = Op.getOperand(0); 4662 SDOperand Size = Op.getOperand(1); 4663 // FIXME: Ensure alignment here 4664 4665 SDOperand Flag; 4666 4667 MVT::ValueType IntPtr = getPointerTy(); 4668 MVT::ValueType SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; 4669 4670 Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag); 4671 Flag = Chain.getValue(1); 4672 4673 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4674 SDOperand Ops[] = { Chain, 4675 DAG.getTargetExternalSymbol("_alloca", IntPtr), 4676 DAG.getRegister(X86::EAX, IntPtr), 4677 Flag }; 4678 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4); 4679 Flag = Chain.getValue(1); 4680 4681 Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1); 4682 4683 std::vector<MVT::ValueType> Tys; 4684 Tys.push_back(SPTy); 4685 Tys.push_back(MVT::Other); 4686 SDOperand Ops1[2] = { Chain.getValue(0), Chain }; 4687 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2); 4688} 4689 4690SDOperand 4691X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, 4692 SDOperand Chain, 4693 SDOperand Dst, SDOperand Src, 4694 SDOperand Size, unsigned Align, 4695 const Value *DstSV, uint64_t DstSVOff) { 4696 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 4697 4698 /// If not DWORD aligned or size is more than the threshold, call the library. 4699 /// The libc version is likely to be faster for these cases. It can use the 4700 /// address value and run time information about the CPU. 4701 if ((Align & 3) == 0 || 4702 !ConstantSize || 4703 ConstantSize->getValue() > getSubtarget()->getMaxInlineSizeThreshold()) { 4704 SDOperand InFlag(0, 0); 4705 4706 // Check to see if there is a specialized entry-point for memory zeroing. 4707 ConstantSDNode *V = dyn_cast<ConstantSDNode>(Src); 4708 if (const char *bzeroEntry = 4709 V && V->isNullValue() ? Subtarget->getBZeroEntry() : 0) { 4710 MVT::ValueType IntPtr = getPointerTy(); 4711 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 4712 TargetLowering::ArgListTy Args; 4713 TargetLowering::ArgListEntry Entry; 4714 Entry.Node = Dst; 4715 Entry.Ty = IntPtrTy; 4716 Args.push_back(Entry); 4717 Entry.Node = Size; 4718 Args.push_back(Entry); 4719 std::pair<SDOperand,SDOperand> CallResult = 4720 LowerCallTo(Chain, Type::VoidTy, false, false, false, CallingConv::C, 4721 false, DAG.getExternalSymbol(bzeroEntry, IntPtr), 4722 Args, DAG); 4723 return CallResult.second; 4724 } 4725 4726 // Otherwise have the target-independent code call memset. 4727 return SDOperand(); 4728 } 4729 4730 uint64_t SizeVal = ConstantSize->getValue(); 4731 SDOperand InFlag(0, 0); 4732 MVT::ValueType AVT; 4733 SDOperand Count; 4734 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Src); 4735 unsigned BytesLeft = 0; 4736 bool TwoRepStos = false; 4737 if (ValC) { 4738 unsigned ValReg; 4739 uint64_t Val = ValC->getValue() & 255; 4740 4741 // If the value is a constant, then we can potentially use larger sets. 4742 switch (Align & 3) { 4743 case 2: // WORD aligned 4744 AVT = MVT::i16; 4745 ValReg = X86::AX; 4746 Val = (Val << 8) | Val; 4747 break; 4748 case 0: // DWORD aligned 4749 AVT = MVT::i32; 4750 ValReg = X86::EAX; 4751 Val = (Val << 8) | Val; 4752 Val = (Val << 16) | Val; 4753 if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) { // QWORD aligned 4754 AVT = MVT::i64; 4755 ValReg = X86::RAX; 4756 Val = (Val << 32) | Val; 4757 } 4758 break; 4759 default: // Byte aligned 4760 AVT = MVT::i8; 4761 ValReg = X86::AL; 4762 Count = DAG.getIntPtrConstant(SizeVal); 4763 break; 4764 } 4765 4766 if (AVT > MVT::i8) { 4767 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4768 Count = DAG.getIntPtrConstant(SizeVal / UBytes); 4769 BytesLeft = SizeVal % UBytes; 4770 } 4771 4772 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT), 4773 InFlag); 4774 InFlag = Chain.getValue(1); 4775 } else { 4776 AVT = MVT::i8; 4777 Count = DAG.getIntPtrConstant(SizeVal); 4778 Chain = DAG.getCopyToReg(Chain, X86::AL, Src, InFlag); 4779 InFlag = Chain.getValue(1); 4780 } 4781 4782 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4783 Count, InFlag); 4784 InFlag = Chain.getValue(1); 4785 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4786 Dst, InFlag); 4787 InFlag = Chain.getValue(1); 4788 4789 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4790 SmallVector<SDOperand, 8> Ops; 4791 Ops.push_back(Chain); 4792 Ops.push_back(DAG.getValueType(AVT)); 4793 Ops.push_back(InFlag); 4794 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4795 4796 if (TwoRepStos) { 4797 InFlag = Chain.getValue(1); 4798 Count = Size; 4799 MVT::ValueType CVT = Count.getValueType(); 4800 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4801 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4802 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4803 Left, InFlag); 4804 InFlag = Chain.getValue(1); 4805 Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4806 Ops.clear(); 4807 Ops.push_back(Chain); 4808 Ops.push_back(DAG.getValueType(MVT::i8)); 4809 Ops.push_back(InFlag); 4810 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4811 } else if (BytesLeft) { 4812 // Handle the last 1 - 7 bytes. 4813 unsigned Offset = SizeVal - BytesLeft; 4814 MVT::ValueType AddrVT = Dst.getValueType(); 4815 MVT::ValueType SizeVT = Size.getValueType(); 4816 4817 Chain = DAG.getMemset(Chain, 4818 DAG.getNode(ISD::ADD, AddrVT, Dst, 4819 DAG.getConstant(Offset, AddrVT)), 4820 Src, 4821 DAG.getConstant(BytesLeft, SizeVT), 4822 Align, DstSV, DstSVOff + Offset); 4823 } 4824 4825 // TODO: Use a Tokenfactor, as in memcpy, instead of a single chain. 4826 return Chain; 4827} 4828 4829SDOperand 4830X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, 4831 SDOperand Chain, 4832 SDOperand Dst, SDOperand Src, 4833 SDOperand Size, unsigned Align, 4834 bool AlwaysInline, 4835 const Value *DstSV, uint64_t DstSVOff, 4836 const Value *SrcSV, uint64_t SrcSVOff){ 4837 4838 // This requires the copy size to be a constant, preferrably 4839 // within a subtarget-specific limit. 4840 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 4841 if (!ConstantSize) 4842 return SDOperand(); 4843 uint64_t SizeVal = ConstantSize->getValue(); 4844 if (!AlwaysInline && SizeVal > getSubtarget()->getMaxInlineSizeThreshold()) 4845 return SDOperand(); 4846 4847 MVT::ValueType AVT; 4848 unsigned BytesLeft = 0; 4849 if (Align >= 8 && Subtarget->is64Bit()) 4850 AVT = MVT::i64; 4851 else if (Align >= 4) 4852 AVT = MVT::i32; 4853 else if (Align >= 2) 4854 AVT = MVT::i16; 4855 else 4856 AVT = MVT::i8; 4857 4858 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4859 unsigned CountVal = SizeVal / UBytes; 4860 SDOperand Count = DAG.getIntPtrConstant(CountVal); 4861 BytesLeft = SizeVal % UBytes; 4862 4863 SDOperand InFlag(0, 0); 4864 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4865 Count, InFlag); 4866 InFlag = Chain.getValue(1); 4867 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4868 Dst, InFlag); 4869 InFlag = Chain.getValue(1); 4870 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI, 4871 Src, InFlag); 4872 InFlag = Chain.getValue(1); 4873 4874 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4875 SmallVector<SDOperand, 8> Ops; 4876 Ops.push_back(Chain); 4877 Ops.push_back(DAG.getValueType(AVT)); 4878 Ops.push_back(InFlag); 4879 SDOperand RepMovs = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4880 4881 SmallVector<SDOperand, 4> Results; 4882 Results.push_back(RepMovs); 4883 if (BytesLeft) { 4884 // Handle the last 1 - 7 bytes. 4885 unsigned Offset = SizeVal - BytesLeft; 4886 MVT::ValueType DstVT = Dst.getValueType(); 4887 MVT::ValueType SrcVT = Src.getValueType(); 4888 MVT::ValueType SizeVT = Size.getValueType(); 4889 Results.push_back(DAG.getMemcpy(Chain, 4890 DAG.getNode(ISD::ADD, DstVT, Dst, 4891 DAG.getConstant(Offset, DstVT)), 4892 DAG.getNode(ISD::ADD, SrcVT, Src, 4893 DAG.getConstant(Offset, SrcVT)), 4894 DAG.getConstant(BytesLeft, SizeVT), 4895 Align, AlwaysInline, 4896 DstSV, DstSVOff + Offset, 4897 SrcSV, SrcSVOff + Offset)); 4898 } 4899 4900 return DAG.getNode(ISD::TokenFactor, MVT::Other, &Results[0], Results.size()); 4901} 4902 4903/// Expand the result of: i64,outchain = READCYCLECOUNTER inchain 4904SDNode *X86TargetLowering::ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG){ 4905 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4906 SDOperand TheChain = N->getOperand(0); 4907 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheChain, 1); 4908 if (Subtarget->is64Bit()) { 4909 SDOperand rax = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); 4910 SDOperand rdx = DAG.getCopyFromReg(rax.getValue(1), X86::RDX, 4911 MVT::i64, rax.getValue(2)); 4912 SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, rdx, 4913 DAG.getConstant(32, MVT::i8)); 4914 SDOperand Ops[] = { 4915 DAG.getNode(ISD::OR, MVT::i64, rax, Tmp), rdx.getValue(1) 4916 }; 4917 4918 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4919 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4920 } 4921 4922 SDOperand eax = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); 4923 SDOperand edx = DAG.getCopyFromReg(eax.getValue(1), X86::EDX, 4924 MVT::i32, eax.getValue(2)); 4925 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 4926 SDOperand Ops[] = { eax, edx }; 4927 Ops[0] = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Ops, 2); 4928 4929 // Use a MERGE_VALUES to return the value and chain. 4930 Ops[1] = edx.getValue(1); 4931 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4932 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4933} 4934 4935SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { 4936 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 4937 4938 if (!Subtarget->is64Bit()) { 4939 // vastart just stores the address of the VarArgsFrameIndex slot into the 4940 // memory location argument. 4941 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4942 return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV, 0); 4943 } 4944 4945 // __va_list_tag: 4946 // gp_offset (0 - 6 * 8) 4947 // fp_offset (48 - 48 + 8 * 16) 4948 // overflow_arg_area (point to parameters coming in memory). 4949 // reg_save_area 4950 SmallVector<SDOperand, 8> MemOps; 4951 SDOperand FIN = Op.getOperand(1); 4952 // Store gp_offset 4953 SDOperand Store = DAG.getStore(Op.getOperand(0), 4954 DAG.getConstant(VarArgsGPOffset, MVT::i32), 4955 FIN, SV, 0); 4956 MemOps.push_back(Store); 4957 4958 // Store fp_offset 4959 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4960 Store = DAG.getStore(Op.getOperand(0), 4961 DAG.getConstant(VarArgsFPOffset, MVT::i32), 4962 FIN, SV, 0); 4963 MemOps.push_back(Store); 4964 4965 // Store ptr to overflow_arg_area 4966 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4967 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4968 Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV, 0); 4969 MemOps.push_back(Store); 4970 4971 // Store ptr to reg_save_area. 4972 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); 4973 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 4974 Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV, 0); 4975 MemOps.push_back(Store); 4976 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size()); 4977} 4978 4979SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) { 4980 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 4981 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!"); 4982 SDOperand Chain = Op.getOperand(0); 4983 SDOperand DstPtr = Op.getOperand(1); 4984 SDOperand SrcPtr = Op.getOperand(2); 4985 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 4986 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 4987 4988 return DAG.getMemcpy(Chain, DstPtr, SrcPtr, 4989 DAG.getIntPtrConstant(24), 8, false, 4990 DstSV, 0, SrcSV, 0); 4991} 4992 4993SDOperand 4994X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 4995 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); 4996 switch (IntNo) { 4997 default: return SDOperand(); // Don't custom lower most intrinsics. 4998 // Comparison intrinsics. 4999 case Intrinsic::x86_sse_comieq_ss: 5000 case Intrinsic::x86_sse_comilt_ss: 5001 case Intrinsic::x86_sse_comile_ss: 5002 case Intrinsic::x86_sse_comigt_ss: 5003 case Intrinsic::x86_sse_comige_ss: 5004 case Intrinsic::x86_sse_comineq_ss: 5005 case Intrinsic::x86_sse_ucomieq_ss: 5006 case Intrinsic::x86_sse_ucomilt_ss: 5007 case Intrinsic::x86_sse_ucomile_ss: 5008 case Intrinsic::x86_sse_ucomigt_ss: 5009 case Intrinsic::x86_sse_ucomige_ss: 5010 case Intrinsic::x86_sse_ucomineq_ss: 5011 case Intrinsic::x86_sse2_comieq_sd: 5012 case Intrinsic::x86_sse2_comilt_sd: 5013 case Intrinsic::x86_sse2_comile_sd: 5014 case Intrinsic::x86_sse2_comigt_sd: 5015 case Intrinsic::x86_sse2_comige_sd: 5016 case Intrinsic::x86_sse2_comineq_sd: 5017 case Intrinsic::x86_sse2_ucomieq_sd: 5018 case Intrinsic::x86_sse2_ucomilt_sd: 5019 case Intrinsic::x86_sse2_ucomile_sd: 5020 case Intrinsic::x86_sse2_ucomigt_sd: 5021 case Intrinsic::x86_sse2_ucomige_sd: 5022 case Intrinsic::x86_sse2_ucomineq_sd: { 5023 unsigned Opc = 0; 5024 ISD::CondCode CC = ISD::SETCC_INVALID; 5025 switch (IntNo) { 5026 default: break; 5027 case Intrinsic::x86_sse_comieq_ss: 5028 case Intrinsic::x86_sse2_comieq_sd: 5029 Opc = X86ISD::COMI; 5030 CC = ISD::SETEQ; 5031 break; 5032 case Intrinsic::x86_sse_comilt_ss: 5033 case Intrinsic::x86_sse2_comilt_sd: 5034 Opc = X86ISD::COMI; 5035 CC = ISD::SETLT; 5036 break; 5037 case Intrinsic::x86_sse_comile_ss: 5038 case Intrinsic::x86_sse2_comile_sd: 5039 Opc = X86ISD::COMI; 5040 CC = ISD::SETLE; 5041 break; 5042 case Intrinsic::x86_sse_comigt_ss: 5043 case Intrinsic::x86_sse2_comigt_sd: 5044 Opc = X86ISD::COMI; 5045 CC = ISD::SETGT; 5046 break; 5047 case Intrinsic::x86_sse_comige_ss: 5048 case Intrinsic::x86_sse2_comige_sd: 5049 Opc = X86ISD::COMI; 5050 CC = ISD::SETGE; 5051 break; 5052 case Intrinsic::x86_sse_comineq_ss: 5053 case Intrinsic::x86_sse2_comineq_sd: 5054 Opc = X86ISD::COMI; 5055 CC = ISD::SETNE; 5056 break; 5057 case Intrinsic::x86_sse_ucomieq_ss: 5058 case Intrinsic::x86_sse2_ucomieq_sd: 5059 Opc = X86ISD::UCOMI; 5060 CC = ISD::SETEQ; 5061 break; 5062 case Intrinsic::x86_sse_ucomilt_ss: 5063 case Intrinsic::x86_sse2_ucomilt_sd: 5064 Opc = X86ISD::UCOMI; 5065 CC = ISD::SETLT; 5066 break; 5067 case Intrinsic::x86_sse_ucomile_ss: 5068 case Intrinsic::x86_sse2_ucomile_sd: 5069 Opc = X86ISD::UCOMI; 5070 CC = ISD::SETLE; 5071 break; 5072 case Intrinsic::x86_sse_ucomigt_ss: 5073 case Intrinsic::x86_sse2_ucomigt_sd: 5074 Opc = X86ISD::UCOMI; 5075 CC = ISD::SETGT; 5076 break; 5077 case Intrinsic::x86_sse_ucomige_ss: 5078 case Intrinsic::x86_sse2_ucomige_sd: 5079 Opc = X86ISD::UCOMI; 5080 CC = ISD::SETGE; 5081 break; 5082 case Intrinsic::x86_sse_ucomineq_ss: 5083 case Intrinsic::x86_sse2_ucomineq_sd: 5084 Opc = X86ISD::UCOMI; 5085 CC = ISD::SETNE; 5086 break; 5087 } 5088 5089 unsigned X86CC; 5090 SDOperand LHS = Op.getOperand(1); 5091 SDOperand RHS = Op.getOperand(2); 5092 translateX86CC(CC, true, X86CC, LHS, RHS, DAG); 5093 5094 SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS); 5095 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8, 5096 DAG.getConstant(X86CC, MVT::i8), Cond); 5097 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); 5098 } 5099 5100 // Fix vector shift instructions where the last operand is a non-immediate 5101 // i32 value. 5102 case Intrinsic::x86_sse2_pslli_w: 5103 case Intrinsic::x86_sse2_pslli_d: 5104 case Intrinsic::x86_sse2_pslli_q: 5105 case Intrinsic::x86_sse2_psrli_w: 5106 case Intrinsic::x86_sse2_psrli_d: 5107 case Intrinsic::x86_sse2_psrli_q: 5108 case Intrinsic::x86_sse2_psrai_w: 5109 case Intrinsic::x86_sse2_psrai_d: 5110 case Intrinsic::x86_mmx_pslli_w: 5111 case Intrinsic::x86_mmx_pslli_d: 5112 case Intrinsic::x86_mmx_pslli_q: 5113 case Intrinsic::x86_mmx_psrli_w: 5114 case Intrinsic::x86_mmx_psrli_d: 5115 case Intrinsic::x86_mmx_psrli_q: 5116 case Intrinsic::x86_mmx_psrai_w: 5117 case Intrinsic::x86_mmx_psrai_d: { 5118 SDOperand ShAmt = Op.getOperand(2); 5119 if (isa<ConstantSDNode>(ShAmt)) 5120 return SDOperand(); 5121 5122 unsigned NewIntNo = 0; 5123 MVT::ValueType ShAmtVT = MVT::v4i32; 5124 switch (IntNo) { 5125 case Intrinsic::x86_sse2_pslli_w: 5126 NewIntNo = Intrinsic::x86_sse2_psll_w; 5127 break; 5128 case Intrinsic::x86_sse2_pslli_d: 5129 NewIntNo = Intrinsic::x86_sse2_psll_d; 5130 break; 5131 case Intrinsic::x86_sse2_pslli_q: 5132 NewIntNo = Intrinsic::x86_sse2_psll_q; 5133 break; 5134 case Intrinsic::x86_sse2_psrli_w: 5135 NewIntNo = Intrinsic::x86_sse2_psrl_w; 5136 break; 5137 case Intrinsic::x86_sse2_psrli_d: 5138 NewIntNo = Intrinsic::x86_sse2_psrl_d; 5139 break; 5140 case Intrinsic::x86_sse2_psrli_q: 5141 NewIntNo = Intrinsic::x86_sse2_psrl_q; 5142 break; 5143 case Intrinsic::x86_sse2_psrai_w: 5144 NewIntNo = Intrinsic::x86_sse2_psra_w; 5145 break; 5146 case Intrinsic::x86_sse2_psrai_d: 5147 NewIntNo = Intrinsic::x86_sse2_psra_d; 5148 break; 5149 default: { 5150 ShAmtVT = MVT::v2i32; 5151 switch (IntNo) { 5152 case Intrinsic::x86_mmx_pslli_w: 5153 NewIntNo = Intrinsic::x86_mmx_psll_w; 5154 break; 5155 case Intrinsic::x86_mmx_pslli_d: 5156 NewIntNo = Intrinsic::x86_mmx_psll_d; 5157 break; 5158 case Intrinsic::x86_mmx_pslli_q: 5159 NewIntNo = Intrinsic::x86_mmx_psll_q; 5160 break; 5161 case Intrinsic::x86_mmx_psrli_w: 5162 NewIntNo = Intrinsic::x86_mmx_psrl_w; 5163 break; 5164 case Intrinsic::x86_mmx_psrli_d: 5165 NewIntNo = Intrinsic::x86_mmx_psrl_d; 5166 break; 5167 case Intrinsic::x86_mmx_psrli_q: 5168 NewIntNo = Intrinsic::x86_mmx_psrl_q; 5169 break; 5170 case Intrinsic::x86_mmx_psrai_w: 5171 NewIntNo = Intrinsic::x86_mmx_psra_w; 5172 break; 5173 case Intrinsic::x86_mmx_psrai_d: 5174 NewIntNo = Intrinsic::x86_mmx_psra_d; 5175 break; 5176 default: abort(); // Can't reach here. 5177 } 5178 break; 5179 } 5180 } 5181 MVT::ValueType VT = Op.getValueType(); 5182 ShAmt = DAG.getNode(ISD::BIT_CONVERT, VT, 5183 DAG.getNode(ISD::SCALAR_TO_VECTOR, ShAmtVT, ShAmt)); 5184 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VT, 5185 DAG.getConstant(NewIntNo, MVT::i32), 5186 Op.getOperand(1), ShAmt); 5187 } 5188 } 5189} 5190 5191SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { 5192 // Depths > 0 not supported yet! 5193 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 5194 return SDOperand(); 5195 5196 // Just load the return address 5197 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 5198 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); 5199} 5200 5201SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { 5202 // Depths > 0 not supported yet! 5203 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 5204 return SDOperand(); 5205 5206 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 5207 return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI, 5208 DAG.getIntPtrConstant(4)); 5209} 5210 5211SDOperand X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDOperand Op, 5212 SelectionDAG &DAG) { 5213 // Is not yet supported on x86-64 5214 if (Subtarget->is64Bit()) 5215 return SDOperand(); 5216 5217 return DAG.getIntPtrConstant(8); 5218} 5219 5220SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG) 5221{ 5222 assert(!Subtarget->is64Bit() && 5223 "Lowering of eh_return builtin is not supported yet on x86-64"); 5224 5225 MachineFunction &MF = DAG.getMachineFunction(); 5226 SDOperand Chain = Op.getOperand(0); 5227 SDOperand Offset = Op.getOperand(1); 5228 SDOperand Handler = Op.getOperand(2); 5229 5230 SDOperand Frame = DAG.getRegister(RegInfo->getFrameRegister(MF), 5231 getPointerTy()); 5232 5233 SDOperand StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame, 5234 DAG.getIntPtrConstant(-4UL)); 5235 StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset); 5236 Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0); 5237 Chain = DAG.getCopyToReg(Chain, X86::ECX, StoreAddr); 5238 MF.getRegInfo().addLiveOut(X86::ECX); 5239 5240 return DAG.getNode(X86ISD::EH_RETURN, MVT::Other, 5241 Chain, DAG.getRegister(X86::ECX, getPointerTy())); 5242} 5243 5244SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, 5245 SelectionDAG &DAG) { 5246 SDOperand Root = Op.getOperand(0); 5247 SDOperand Trmp = Op.getOperand(1); // trampoline 5248 SDOperand FPtr = Op.getOperand(2); // nested function 5249 SDOperand Nest = Op.getOperand(3); // 'nest' parameter value 5250 5251 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 5252 5253 const X86InstrInfo *TII = 5254 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 5255 5256 if (Subtarget->is64Bit()) { 5257 SDOperand OutChains[6]; 5258 5259 // Large code-model. 5260 5261 const unsigned char JMP64r = TII->getBaseOpcodeFor(X86::JMP64r); 5262 const unsigned char MOV64ri = TII->getBaseOpcodeFor(X86::MOV64ri); 5263 5264 const unsigned char N86R10 = 5265 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R10); 5266 const unsigned char N86R11 = 5267 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R11); 5268 5269 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 5270 5271 // Load the pointer to the nested function into R11. 5272 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 5273 SDOperand Addr = Trmp; 5274 OutChains[0] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5275 TrmpAddr, 0); 5276 5277 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(2, MVT::i64)); 5278 OutChains[1] = DAG.getStore(Root, FPtr, Addr, TrmpAddr, 2, false, 2); 5279 5280 // Load the 'nest' parameter value into R10. 5281 // R10 is specified in X86CallingConv.td 5282 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 5283 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(10, MVT::i64)); 5284 OutChains[2] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5285 TrmpAddr, 10); 5286 5287 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(12, MVT::i64)); 5288 OutChains[3] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 12, false, 2); 5289 5290 // Jump to the nested function. 5291 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 5292 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(20, MVT::i64)); 5293 OutChains[4] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5294 TrmpAddr, 20); 5295 5296 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 5297 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(22, MVT::i64)); 5298 OutChains[5] = DAG.getStore(Root, DAG.getConstant(ModRM, MVT::i8), Addr, 5299 TrmpAddr, 22); 5300 5301 SDOperand Ops[] = 5302 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 6) }; 5303 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5304 } else { 5305 const Function *Func = 5306 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 5307 unsigned CC = Func->getCallingConv(); 5308 unsigned NestReg; 5309 5310 switch (CC) { 5311 default: 5312 assert(0 && "Unsupported calling convention"); 5313 case CallingConv::C: 5314 case CallingConv::X86_StdCall: { 5315 // Pass 'nest' parameter in ECX. 5316 // Must be kept in sync with X86CallingConv.td 5317 NestReg = X86::ECX; 5318 5319 // Check that ECX wasn't needed by an 'inreg' parameter. 5320 const FunctionType *FTy = Func->getFunctionType(); 5321 const PAListPtr &Attrs = Func->getParamAttrs(); 5322 5323 if (!Attrs.isEmpty() && !Func->isVarArg()) { 5324 unsigned InRegCount = 0; 5325 unsigned Idx = 1; 5326 5327 for (FunctionType::param_iterator I = FTy->param_begin(), 5328 E = FTy->param_end(); I != E; ++I, ++Idx) 5329 if (Attrs.paramHasAttr(Idx, ParamAttr::InReg)) 5330 // FIXME: should only count parameters that are lowered to integers. 5331 InRegCount += (getTargetData()->getTypeSizeInBits(*I) + 31) / 32; 5332 5333 if (InRegCount > 2) { 5334 cerr << "Nest register in use - reduce number of inreg parameters!\n"; 5335 abort(); 5336 } 5337 } 5338 break; 5339 } 5340 case CallingConv::X86_FastCall: 5341 // Pass 'nest' parameter in EAX. 5342 // Must be kept in sync with X86CallingConv.td 5343 NestReg = X86::EAX; 5344 break; 5345 } 5346 5347 SDOperand OutChains[4]; 5348 SDOperand Addr, Disp; 5349 5350 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32)); 5351 Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr); 5352 5353 const unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri); 5354 const unsigned char N86Reg = 5355 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(NestReg); 5356 OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 5357 Trmp, TrmpAddr, 0); 5358 5359 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32)); 5360 OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 1, false, 1); 5361 5362 const unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP); 5363 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32)); 5364 OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr, 5365 TrmpAddr, 5, false, 1); 5366 5367 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(6, MVT::i32)); 5368 OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpAddr, 6, false, 1); 5369 5370 SDOperand Ops[] = 5371 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) }; 5372 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5373 } 5374} 5375 5376SDOperand X86TargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { 5377 /* 5378 The rounding mode is in bits 11:10 of FPSR, and has the following 5379 settings: 5380 00 Round to nearest 5381 01 Round to -inf 5382 10 Round to +inf 5383 11 Round to 0 5384 5385 FLT_ROUNDS, on the other hand, expects the following: 5386 -1 Undefined 5387 0 Round to 0 5388 1 Round to nearest 5389 2 Round to +inf 5390 3 Round to -inf 5391 5392 To perform the conversion, we do: 5393 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 5394 */ 5395 5396 MachineFunction &MF = DAG.getMachineFunction(); 5397 const TargetMachine &TM = MF.getTarget(); 5398 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 5399 unsigned StackAlignment = TFI.getStackAlignment(); 5400 MVT::ValueType VT = Op.getValueType(); 5401 5402 // Save FP Control Word to stack slot 5403 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment); 5404 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 5405 5406 SDOperand Chain = DAG.getNode(X86ISD::FNSTCW16m, MVT::Other, 5407 DAG.getEntryNode(), StackSlot); 5408 5409 // Load FP Control Word from stack slot 5410 SDOperand CWD = DAG.getLoad(MVT::i16, Chain, StackSlot, NULL, 0); 5411 5412 // Transform as necessary 5413 SDOperand CWD1 = 5414 DAG.getNode(ISD::SRL, MVT::i16, 5415 DAG.getNode(ISD::AND, MVT::i16, 5416 CWD, DAG.getConstant(0x800, MVT::i16)), 5417 DAG.getConstant(11, MVT::i8)); 5418 SDOperand CWD2 = 5419 DAG.getNode(ISD::SRL, MVT::i16, 5420 DAG.getNode(ISD::AND, MVT::i16, 5421 CWD, DAG.getConstant(0x400, MVT::i16)), 5422 DAG.getConstant(9, MVT::i8)); 5423 5424 SDOperand RetVal = 5425 DAG.getNode(ISD::AND, MVT::i16, 5426 DAG.getNode(ISD::ADD, MVT::i16, 5427 DAG.getNode(ISD::OR, MVT::i16, CWD1, CWD2), 5428 DAG.getConstant(1, MVT::i16)), 5429 DAG.getConstant(3, MVT::i16)); 5430 5431 5432 return DAG.getNode((MVT::getSizeInBits(VT) < 16 ? 5433 ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); 5434} 5435 5436SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) { 5437 MVT::ValueType VT = Op.getValueType(); 5438 MVT::ValueType OpVT = VT; 5439 unsigned NumBits = MVT::getSizeInBits(VT); 5440 5441 Op = Op.getOperand(0); 5442 if (VT == MVT::i8) { 5443 // Zero extend to i32 since there is not an i8 bsr. 5444 OpVT = MVT::i32; 5445 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5446 } 5447 5448 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 5449 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5450 Op = DAG.getNode(X86ISD::BSR, VTs, Op); 5451 5452 // If src is zero (i.e. bsr sets ZF), returns NumBits. 5453 SmallVector<SDOperand, 4> Ops; 5454 Ops.push_back(Op); 5455 Ops.push_back(DAG.getConstant(NumBits+NumBits-1, OpVT)); 5456 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5457 Ops.push_back(Op.getValue(1)); 5458 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5459 5460 // Finally xor with NumBits-1. 5461 Op = DAG.getNode(ISD::XOR, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 5462 5463 if (VT == MVT::i8) 5464 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5465 return Op; 5466} 5467 5468SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { 5469 MVT::ValueType VT = Op.getValueType(); 5470 MVT::ValueType OpVT = VT; 5471 unsigned NumBits = MVT::getSizeInBits(VT); 5472 5473 Op = Op.getOperand(0); 5474 if (VT == MVT::i8) { 5475 OpVT = MVT::i32; 5476 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5477 } 5478 5479 // Issue a bsf (scan bits forward) which also sets EFLAGS. 5480 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5481 Op = DAG.getNode(X86ISD::BSF, VTs, Op); 5482 5483 // If src is zero (i.e. bsf sets ZF), returns NumBits. 5484 SmallVector<SDOperand, 4> Ops; 5485 Ops.push_back(Op); 5486 Ops.push_back(DAG.getConstant(NumBits, OpVT)); 5487 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5488 Ops.push_back(Op.getValue(1)); 5489 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5490 5491 if (VT == MVT::i8) 5492 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5493 return Op; 5494} 5495 5496SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) { 5497 MVT::ValueType T = cast<AtomicSDNode>(Op.Val)->getVT(); 5498 unsigned Reg = 0; 5499 unsigned size = 0; 5500 switch(T) { 5501 case MVT::i8: Reg = X86::AL; size = 1; break; 5502 case MVT::i16: Reg = X86::AX; size = 2; break; 5503 case MVT::i32: Reg = X86::EAX; size = 4; break; 5504 case MVT::i64: 5505 if (Subtarget->is64Bit()) { 5506 Reg = X86::RAX; size = 8; 5507 } else //Should go away when LowerType stuff lands 5508 return SDOperand(ExpandATOMIC_LCS(Op.Val, DAG), 0); 5509 break; 5510 }; 5511 SDOperand cpIn = DAG.getCopyToReg(Op.getOperand(0), Reg, 5512 Op.getOperand(3), SDOperand()); 5513 SDOperand Ops[] = { cpIn.getValue(0), 5514 Op.getOperand(1), 5515 Op.getOperand(2), 5516 DAG.getTargetConstant(size, MVT::i8), 5517 cpIn.getValue(1) }; 5518 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 5519 SDOperand Result = DAG.getNode(X86ISD::LCMPXCHG_DAG, Tys, Ops, 5); 5520 SDOperand cpOut = 5521 DAG.getCopyFromReg(Result.getValue(0), Reg, T, Result.getValue(1)); 5522 return cpOut; 5523} 5524 5525SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) { 5526 MVT::ValueType T = cast<AtomicSDNode>(Op)->getVT(); 5527 assert (T == MVT::i64 && "Only know how to expand i64 CAS"); 5528 SDOperand cpInL, cpInH; 5529 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), 5530 DAG.getConstant(0, MVT::i32)); 5531 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), 5532 DAG.getConstant(1, MVT::i32)); 5533 cpInL = DAG.getCopyToReg(Op->getOperand(0), X86::EAX, 5534 cpInL, SDOperand()); 5535 cpInH = DAG.getCopyToReg(cpInL.getValue(0), X86::EDX, 5536 cpInH, cpInL.getValue(1)); 5537 SDOperand swapInL, swapInH; 5538 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(2), 5539 DAG.getConstant(0, MVT::i32)); 5540 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(2), 5541 DAG.getConstant(1, MVT::i32)); 5542 swapInL = DAG.getCopyToReg(cpInH.getValue(0), X86::EBX, 5543 swapInL, cpInH.getValue(1)); 5544 swapInH = DAG.getCopyToReg(swapInL.getValue(0), X86::ECX, 5545 swapInH, swapInL.getValue(1)); 5546 SDOperand Ops[] = { swapInH.getValue(0), 5547 Op->getOperand(1), 5548 swapInH.getValue(1)}; 5549 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 5550 SDOperand Result = DAG.getNode(X86ISD::LCMPXCHG8_DAG, Tys, Ops, 3); 5551 SDOperand cpOutL = DAG.getCopyFromReg(Result.getValue(0), X86::EAX, MVT::i32, 5552 Result.getValue(1)); 5553 SDOperand cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), X86::EDX, MVT::i32, 5554 cpOutL.getValue(2)); 5555 SDOperand OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 5556 SDOperand ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OpsF, 2); 5557 Tys = DAG.getVTList(MVT::i64, MVT::Other); 5558 return DAG.getNode(ISD::MERGE_VALUES, Tys, ResultVal, cpOutH.getValue(1)).Val; 5559} 5560 5561SDNode* X86TargetLowering::ExpandATOMIC_LSS(SDNode* Op, SelectionDAG &DAG) { 5562 MVT::ValueType T = cast<AtomicSDNode>(Op)->getVT(); 5563 assert (T == MVT::i32 && "Only know how to expand i32 LSS"); 5564 SDOperand negOp = DAG.getNode(ISD::SUB, T, 5565 DAG.getConstant(0, T), Op->getOperand(2)); 5566 return DAG.getAtomic(ISD::ATOMIC_LAS, Op->getOperand(0), 5567 Op->getOperand(1), negOp, T).Val; 5568} 5569 5570/// LowerOperation - Provide custom lowering hooks for some operations. 5571/// 5572SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 5573 switch (Op.getOpcode()) { 5574 default: assert(0 && "Should not custom lower this!"); 5575 case ISD::ATOMIC_LCS: return LowerLCS(Op,DAG); 5576 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 5577 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5578 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 5579 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 5580 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 5581 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5582 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 5583 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5584 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 5585 case ISD::SHL_PARTS: 5586 case ISD::SRA_PARTS: 5587 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 5588 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 5589 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 5590 case ISD::FABS: return LowerFABS(Op, DAG); 5591 case ISD::FNEG: return LowerFNEG(Op, DAG); 5592 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 5593 case ISD::SETCC: return LowerSETCC(Op, DAG); 5594 case ISD::SELECT: return LowerSELECT(Op, DAG); 5595 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 5596 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 5597 case ISD::CALL: return LowerCALL(Op, DAG); 5598 case ISD::RET: return LowerRET(Op, DAG); 5599 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); 5600 case ISD::VASTART: return LowerVASTART(Op, DAG); 5601 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 5602 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 5603 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5604 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5605 case ISD::FRAME_TO_ARGS_OFFSET: 5606 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 5607 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 5608 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 5609 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 5610 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5611 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 5612 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 5613 5614 // FIXME: REMOVE THIS WHEN LegalizeDAGTypes lands. 5615 case ISD::READCYCLECOUNTER: 5616 return SDOperand(ExpandREADCYCLECOUNTER(Op.Val, DAG), 0); 5617 } 5618} 5619 5620/// ExpandOperation - Provide custom lowering hooks for expanding operations. 5621SDNode *X86TargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) { 5622 switch (N->getOpcode()) { 5623 default: assert(0 && "Should not custom lower this!"); 5624 case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG); 5625 case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG); 5626 case ISD::ATOMIC_LCS: return ExpandATOMIC_LCS(N, DAG); 5627 case ISD::ATOMIC_LSS: return ExpandATOMIC_LSS(N,DAG); 5628 } 5629} 5630 5631const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 5632 switch (Opcode) { 5633 default: return NULL; 5634 case X86ISD::BSF: return "X86ISD::BSF"; 5635 case X86ISD::BSR: return "X86ISD::BSR"; 5636 case X86ISD::SHLD: return "X86ISD::SHLD"; 5637 case X86ISD::SHRD: return "X86ISD::SHRD"; 5638 case X86ISD::FAND: return "X86ISD::FAND"; 5639 case X86ISD::FOR: return "X86ISD::FOR"; 5640 case X86ISD::FXOR: return "X86ISD::FXOR"; 5641 case X86ISD::FSRL: return "X86ISD::FSRL"; 5642 case X86ISD::FILD: return "X86ISD::FILD"; 5643 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 5644 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 5645 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 5646 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 5647 case X86ISD::FLD: return "X86ISD::FLD"; 5648 case X86ISD::FST: return "X86ISD::FST"; 5649 case X86ISD::CALL: return "X86ISD::CALL"; 5650 case X86ISD::TAILCALL: return "X86ISD::TAILCALL"; 5651 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 5652 case X86ISD::CMP: return "X86ISD::CMP"; 5653 case X86ISD::COMI: return "X86ISD::COMI"; 5654 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 5655 case X86ISD::SETCC: return "X86ISD::SETCC"; 5656 case X86ISD::CMOV: return "X86ISD::CMOV"; 5657 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 5658 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 5659 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 5660 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 5661 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 5662 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 5663 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 5664 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 5665 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 5666 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 5667 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 5668 case X86ISD::FMAX: return "X86ISD::FMAX"; 5669 case X86ISD::FMIN: return "X86ISD::FMIN"; 5670 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 5671 case X86ISD::FRCP: return "X86ISD::FRCP"; 5672 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 5673 case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER"; 5674 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 5675 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 5676 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 5677 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG"; 5678 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG"; 5679 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL"; 5680 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD"; 5681 } 5682} 5683 5684// isLegalAddressingMode - Return true if the addressing mode represented 5685// by AM is legal for this target, for a load/store of the specified type. 5686bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 5687 const Type *Ty) const { 5688 // X86 supports extremely general addressing modes. 5689 5690 // X86 allows a sign-extended 32-bit immediate field as a displacement. 5691 if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1) 5692 return false; 5693 5694 if (AM.BaseGV) { 5695 // We can only fold this if we don't need an extra load. 5696 if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false)) 5697 return false; 5698 5699 // X86-64 only supports addr of globals in small code model. 5700 if (Subtarget->is64Bit()) { 5701 if (getTargetMachine().getCodeModel() != CodeModel::Small) 5702 return false; 5703 // If lower 4G is not available, then we must use rip-relative addressing. 5704 if (AM.BaseOffs || AM.Scale > 1) 5705 return false; 5706 } 5707 } 5708 5709 switch (AM.Scale) { 5710 case 0: 5711 case 1: 5712 case 2: 5713 case 4: 5714 case 8: 5715 // These scales always work. 5716 break; 5717 case 3: 5718 case 5: 5719 case 9: 5720 // These scales are formed with basereg+scalereg. Only accept if there is 5721 // no basereg yet. 5722 if (AM.HasBaseReg) 5723 return false; 5724 break; 5725 default: // Other stuff never works. 5726 return false; 5727 } 5728 5729 return true; 5730} 5731 5732 5733bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const { 5734 if (!Ty1->isInteger() || !Ty2->isInteger()) 5735 return false; 5736 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 5737 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 5738 if (NumBits1 <= NumBits2) 5739 return false; 5740 return Subtarget->is64Bit() || NumBits1 < 64; 5741} 5742 5743bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1, 5744 MVT::ValueType VT2) const { 5745 if (!MVT::isInteger(VT1) || !MVT::isInteger(VT2)) 5746 return false; 5747 unsigned NumBits1 = MVT::getSizeInBits(VT1); 5748 unsigned NumBits2 = MVT::getSizeInBits(VT2); 5749 if (NumBits1 <= NumBits2) 5750 return false; 5751 return Subtarget->is64Bit() || NumBits1 < 64; 5752} 5753 5754/// isShuffleMaskLegal - Targets can use this to indicate that they only 5755/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 5756/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 5757/// are assumed to be legal. 5758bool 5759X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { 5760 // Only do shuffles on 128-bit vector types for now. 5761 if (MVT::getSizeInBits(VT) == 64) return false; 5762 return (Mask.Val->getNumOperands() <= 4 || 5763 isIdentityMask(Mask.Val) || 5764 isIdentityMask(Mask.Val, true) || 5765 isSplatMask(Mask.Val) || 5766 isPSHUFHW_PSHUFLWMask(Mask.Val) || 5767 X86::isUNPCKLMask(Mask.Val) || 5768 X86::isUNPCKHMask(Mask.Val) || 5769 X86::isUNPCKL_v_undef_Mask(Mask.Val) || 5770 X86::isUNPCKH_v_undef_Mask(Mask.Val)); 5771} 5772 5773bool 5774X86TargetLowering::isVectorClearMaskLegal(const std::vector<SDOperand> &BVOps, 5775 MVT::ValueType EVT, 5776 SelectionDAG &DAG) const { 5777 unsigned NumElts = BVOps.size(); 5778 // Only do shuffles on 128-bit vector types for now. 5779 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; 5780 if (NumElts == 2) return true; 5781 if (NumElts == 4) { 5782 return (isMOVLMask(&BVOps[0], 4) || 5783 isCommutedMOVL(&BVOps[0], 4, true) || 5784 isSHUFPMask(&BVOps[0], 4) || 5785 isCommutedSHUFP(&BVOps[0], 4)); 5786 } 5787 return false; 5788} 5789 5790//===----------------------------------------------------------------------===// 5791// X86 Scheduler Hooks 5792//===----------------------------------------------------------------------===// 5793 5794// private utility function 5795MachineBasicBlock * 5796X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr, 5797 MachineBasicBlock *MBB, 5798 unsigned regOpc, 5799 unsigned immOpc) { 5800 // For the atomic bitwise operator, we generate 5801 // thisMBB: 5802 // newMBB: 5803 // ld t1 = [bitinstr.addr] 5804 // op t2 = t1, [bitinstr.val] 5805 // mov EAX = t1 5806 // lcs dest = [bitinstr.addr], t2 [EAX is implicit] 5807 // bz newMBB 5808 // fallthrough -->nextMBB 5809 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5810 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 5811 ilist<MachineBasicBlock>::iterator MBBIter = MBB; 5812 ++MBBIter; 5813 5814 /// First build the CFG 5815 MachineFunction *F = MBB->getParent(); 5816 MachineBasicBlock *thisMBB = MBB; 5817 MachineBasicBlock *newMBB = new MachineBasicBlock(LLVM_BB); 5818 MachineBasicBlock *nextMBB = new MachineBasicBlock(LLVM_BB); 5819 F->getBasicBlockList().insert(MBBIter, newMBB); 5820 F->getBasicBlockList().insert(MBBIter, nextMBB); 5821 5822 // Move all successors to thisMBB to nextMBB 5823 nextMBB->transferSuccessors(thisMBB); 5824 5825 // Update thisMBB to fall through to newMBB 5826 thisMBB->addSuccessor(newMBB); 5827 5828 // newMBB jumps to itself and fall through to nextMBB 5829 newMBB->addSuccessor(nextMBB); 5830 newMBB->addSuccessor(newMBB); 5831 5832 // Insert instructions into newMBB based on incoming instruction 5833 assert(bInstr->getNumOperands() < 8 && "unexpected number of operands"); 5834 MachineOperand& destOper = bInstr->getOperand(0); 5835 MachineOperand* argOpers[6]; 5836 int numArgs = bInstr->getNumOperands() - 1; 5837 for (int i=0; i < numArgs; ++i) 5838 argOpers[i] = &bInstr->getOperand(i+1); 5839 5840 // x86 address has 4 operands: base, index, scale, and displacement 5841 int lastAddrIndx = 3; // [0,3] 5842 int valArgIndx = 4; 5843 5844 unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 5845 MachineInstrBuilder MIB = BuildMI(newMBB, TII->get(X86::MOV32rm), t1); 5846 for (int i=0; i <= lastAddrIndx; ++i) 5847 (*MIB).addOperand(*argOpers[i]); 5848 5849 unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 5850 assert( (argOpers[valArgIndx]->isReg() || argOpers[valArgIndx]->isImm()) 5851 && "invalid operand"); 5852 if (argOpers[valArgIndx]->isReg()) 5853 MIB = BuildMI(newMBB, TII->get(regOpc), t2); 5854 else 5855 MIB = BuildMI(newMBB, TII->get(immOpc), t2); 5856 MIB.addReg(t1); 5857 (*MIB).addOperand(*argOpers[valArgIndx]); 5858 5859 MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), X86::EAX); 5860 MIB.addReg(t1); 5861 5862 MIB = BuildMI(newMBB, TII->get(X86::LCMPXCHG32)); 5863 for (int i=0; i <= lastAddrIndx; ++i) 5864 (*MIB).addOperand(*argOpers[i]); 5865 MIB.addReg(t2); 5866 5867 MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), destOper.getReg()); 5868 MIB.addReg(X86::EAX); 5869 5870 // insert branch 5871 BuildMI(newMBB, TII->get(X86::JNE)).addMBB(newMBB); 5872 5873 delete bInstr; // The pseudo instruction is gone now. 5874 return nextMBB; 5875} 5876 5877// private utility function 5878MachineBasicBlock * 5879X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr, 5880 MachineBasicBlock *MBB, 5881 unsigned cmovOpc) { 5882 // For the atomic min/max operator, we generate 5883 // thisMBB: 5884 // newMBB: 5885 // ld t1 = [min/max.addr] 5886 // mov t2 = [min/max.val] 5887 // cmp t1, t2 5888 // cmov[cond] t2 = t1 5889 // mov EAX = t1 5890 // lcs dest = [bitinstr.addr], t2 [EAX is implicit] 5891 // bz newMBB 5892 // fallthrough -->nextMBB 5893 // 5894 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5895 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 5896 ilist<MachineBasicBlock>::iterator MBBIter = MBB; 5897 ++MBBIter; 5898 5899 /// First build the CFG 5900 MachineFunction *F = MBB->getParent(); 5901 MachineBasicBlock *thisMBB = MBB; 5902 MachineBasicBlock *newMBB = new MachineBasicBlock(LLVM_BB); 5903 MachineBasicBlock *nextMBB = new MachineBasicBlock(LLVM_BB); 5904 F->getBasicBlockList().insert(MBBIter, newMBB); 5905 F->getBasicBlockList().insert(MBBIter, nextMBB); 5906 5907 // Move all successors to thisMBB to nextMBB 5908 nextMBB->transferSuccessors(thisMBB); 5909 5910 // Update thisMBB to fall through to newMBB 5911 thisMBB->addSuccessor(newMBB); 5912 5913 // newMBB jumps to newMBB and fall through to nextMBB 5914 newMBB->addSuccessor(nextMBB); 5915 newMBB->addSuccessor(newMBB); 5916 5917 // Insert instructions into newMBB based on incoming instruction 5918 assert(mInstr->getNumOperands() < 8 && "unexpected number of operands"); 5919 MachineOperand& destOper = mInstr->getOperand(0); 5920 MachineOperand* argOpers[6]; 5921 int numArgs = mInstr->getNumOperands() - 1; 5922 for (int i=0; i < numArgs; ++i) 5923 argOpers[i] = &mInstr->getOperand(i+1); 5924 5925 // x86 address has 4 operands: base, index, scale, and displacement 5926 int lastAddrIndx = 3; // [0,3] 5927 int valArgIndx = 4; 5928 5929 unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 5930 MachineInstrBuilder MIB = BuildMI(newMBB, TII->get(X86::MOV32rm), t1); 5931 for (int i=0; i <= lastAddrIndx; ++i) 5932 (*MIB).addOperand(*argOpers[i]); 5933 5934 // We only support register and immediate values 5935 assert( (argOpers[valArgIndx]->isReg() || argOpers[valArgIndx]->isImm()) 5936 && "invalid operand"); 5937 5938 unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 5939 if (argOpers[valArgIndx]->isReg()) 5940 MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), t2); 5941 else 5942 MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), t2); 5943 (*MIB).addOperand(*argOpers[valArgIndx]); 5944 5945 MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), X86::EAX); 5946 MIB.addReg(t1); 5947 5948 MIB = BuildMI(newMBB, TII->get(X86::CMP32rr)); 5949 MIB.addReg(t1); 5950 MIB.addReg(t2); 5951 5952 // Generate movc 5953 unsigned t3 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 5954 MIB = BuildMI(newMBB, TII->get(cmovOpc),t3); 5955 MIB.addReg(t2); 5956 MIB.addReg(t1); 5957 5958 // Cmp and exchange if none has modified the memory location 5959 MIB = BuildMI(newMBB, TII->get(X86::LCMPXCHG32)); 5960 for (int i=0; i <= lastAddrIndx; ++i) 5961 (*MIB).addOperand(*argOpers[i]); 5962 MIB.addReg(t3); 5963 5964 MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), destOper.getReg()); 5965 MIB.addReg(X86::EAX); 5966 5967 // insert branch 5968 BuildMI(newMBB, TII->get(X86::JNE)).addMBB(newMBB); 5969 5970 delete mInstr; // The pseudo instruction is gone now. 5971 return nextMBB; 5972} 5973 5974 5975MachineBasicBlock * 5976X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 5977 MachineBasicBlock *BB) { 5978 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5979 switch (MI->getOpcode()) { 5980 default: assert(false && "Unexpected instr type to insert"); 5981 case X86::CMOV_FR32: 5982 case X86::CMOV_FR64: 5983 case X86::CMOV_V4F32: 5984 case X86::CMOV_V2F64: 5985 case X86::CMOV_V2I64: { 5986 // To "insert" a SELECT_CC instruction, we actually have to insert the 5987 // diamond control-flow pattern. The incoming instruction knows the 5988 // destination vreg to set, the condition code register to branch on, the 5989 // true/false values to select between, and a branch opcode to use. 5990 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5991 ilist<MachineBasicBlock>::iterator It = BB; 5992 ++It; 5993 5994 // thisMBB: 5995 // ... 5996 // TrueVal = ... 5997 // cmpTY ccX, r1, r2 5998 // bCC copy1MBB 5999 // fallthrough --> copy0MBB 6000 MachineBasicBlock *thisMBB = BB; 6001 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 6002 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 6003 unsigned Opc = 6004 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 6005 BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB); 6006 MachineFunction *F = BB->getParent(); 6007 F->getBasicBlockList().insert(It, copy0MBB); 6008 F->getBasicBlockList().insert(It, sinkMBB); 6009 // Update machine-CFG edges by transferring all successors of the current 6010 // block to the new block which will contain the Phi node for the select. 6011 sinkMBB->transferSuccessors(BB); 6012 6013 // Add the true and fallthrough blocks as its successors. 6014 BB->addSuccessor(copy0MBB); 6015 BB->addSuccessor(sinkMBB); 6016 6017 // copy0MBB: 6018 // %FalseValue = ... 6019 // # fallthrough to sinkMBB 6020 BB = copy0MBB; 6021 6022 // Update machine-CFG edges 6023 BB->addSuccessor(sinkMBB); 6024 6025 // sinkMBB: 6026 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 6027 // ... 6028 BB = sinkMBB; 6029 BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg()) 6030 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 6031 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 6032 6033 delete MI; // The pseudo instruction is gone now. 6034 return BB; 6035 } 6036 6037 case X86::FP32_TO_INT16_IN_MEM: 6038 case X86::FP32_TO_INT32_IN_MEM: 6039 case X86::FP32_TO_INT64_IN_MEM: 6040 case X86::FP64_TO_INT16_IN_MEM: 6041 case X86::FP64_TO_INT32_IN_MEM: 6042 case X86::FP64_TO_INT64_IN_MEM: 6043 case X86::FP80_TO_INT16_IN_MEM: 6044 case X86::FP80_TO_INT32_IN_MEM: 6045 case X86::FP80_TO_INT64_IN_MEM: { 6046 // Change the floating point control register to use "round towards zero" 6047 // mode when truncating to an integer value. 6048 MachineFunction *F = BB->getParent(); 6049 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2); 6050 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx); 6051 6052 // Load the old value of the high byte of the control word... 6053 unsigned OldCW = 6054 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass); 6055 addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx); 6056 6057 // Set the high part to be round to zero... 6058 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx) 6059 .addImm(0xC7F); 6060 6061 // Reload the modified control word now... 6062 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 6063 6064 // Restore the memory image of control word to original value 6065 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx) 6066 .addReg(OldCW); 6067 6068 // Get the X86 opcode to use. 6069 unsigned Opc; 6070 switch (MI->getOpcode()) { 6071 default: assert(0 && "illegal opcode!"); 6072 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 6073 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 6074 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 6075 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 6076 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 6077 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 6078 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 6079 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 6080 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 6081 } 6082 6083 X86AddressMode AM; 6084 MachineOperand &Op = MI->getOperand(0); 6085 if (Op.isRegister()) { 6086 AM.BaseType = X86AddressMode::RegBase; 6087 AM.Base.Reg = Op.getReg(); 6088 } else { 6089 AM.BaseType = X86AddressMode::FrameIndexBase; 6090 AM.Base.FrameIndex = Op.getIndex(); 6091 } 6092 Op = MI->getOperand(1); 6093 if (Op.isImmediate()) 6094 AM.Scale = Op.getImm(); 6095 Op = MI->getOperand(2); 6096 if (Op.isImmediate()) 6097 AM.IndexReg = Op.getImm(); 6098 Op = MI->getOperand(3); 6099 if (Op.isGlobalAddress()) { 6100 AM.GV = Op.getGlobal(); 6101 } else { 6102 AM.Disp = Op.getImm(); 6103 } 6104 addFullAddress(BuildMI(BB, TII->get(Opc)), AM) 6105 .addReg(MI->getOperand(4).getReg()); 6106 6107 // Reload the original control word now. 6108 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 6109 6110 delete MI; // The pseudo instruction is gone now. 6111 return BB; 6112 } 6113 case X86::ATOMAND32: 6114 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, 6115 X86::AND32ri); 6116 case X86::ATOMOR32: 6117 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr, 6118 X86::OR32ri); 6119 case X86::ATOMXOR32: 6120 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr, 6121 X86::XOR32ri); 6122 case X86::ATOMMIN32: 6123 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr); 6124 case X86::ATOMMAX32: 6125 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG32rr); 6126 case X86::ATOMUMIN32: 6127 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB32rr); 6128 case X86::ATOMUMAX32: 6129 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA32rr); 6130 } 6131} 6132 6133//===----------------------------------------------------------------------===// 6134// X86 Optimization Hooks 6135//===----------------------------------------------------------------------===// 6136 6137void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 6138 const APInt &Mask, 6139 APInt &KnownZero, 6140 APInt &KnownOne, 6141 const SelectionDAG &DAG, 6142 unsigned Depth) const { 6143 unsigned Opc = Op.getOpcode(); 6144 assert((Opc >= ISD::BUILTIN_OP_END || 6145 Opc == ISD::INTRINSIC_WO_CHAIN || 6146 Opc == ISD::INTRINSIC_W_CHAIN || 6147 Opc == ISD::INTRINSIC_VOID) && 6148 "Should use MaskedValueIsZero if you don't know whether Op" 6149 " is a target node!"); 6150 6151 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything. 6152 switch (Opc) { 6153 default: break; 6154 case X86ISD::SETCC: 6155 KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(), 6156 Mask.getBitWidth() - 1); 6157 break; 6158 } 6159} 6160 6161/// getShuffleScalarElt - Returns the scalar element that will make up the ith 6162/// element of the result of the vector shuffle. 6163static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) { 6164 MVT::ValueType VT = N->getValueType(0); 6165 SDOperand PermMask = N->getOperand(2); 6166 unsigned NumElems = PermMask.getNumOperands(); 6167 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1); 6168 i %= NumElems; 6169 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) { 6170 return (i == 0) 6171 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 6172 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) { 6173 SDOperand Idx = PermMask.getOperand(i); 6174 if (Idx.getOpcode() == ISD::UNDEF) 6175 return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 6176 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG); 6177 } 6178 return SDOperand(); 6179} 6180 6181/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 6182/// node is a GlobalAddress + an offset. 6183static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) { 6184 unsigned Opc = N->getOpcode(); 6185 if (Opc == X86ISD::Wrapper) { 6186 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) { 6187 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 6188 return true; 6189 } 6190 } else if (Opc == ISD::ADD) { 6191 SDOperand N1 = N->getOperand(0); 6192 SDOperand N2 = N->getOperand(1); 6193 if (isGAPlusOffset(N1.Val, GA, Offset)) { 6194 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 6195 if (V) { 6196 Offset += V->getSignExtended(); 6197 return true; 6198 } 6199 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 6200 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 6201 if (V) { 6202 Offset += V->getSignExtended(); 6203 return true; 6204 } 6205 } 6206 } 6207 return false; 6208} 6209 6210/// isConsecutiveLoad - Returns true if N is loading from an address of Base 6211/// + Dist * Size. 6212static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size, 6213 MachineFrameInfo *MFI) { 6214 if (N->getOperand(0).Val != Base->getOperand(0).Val) 6215 return false; 6216 6217 SDOperand Loc = N->getOperand(1); 6218 SDOperand BaseLoc = Base->getOperand(1); 6219 if (Loc.getOpcode() == ISD::FrameIndex) { 6220 if (BaseLoc.getOpcode() != ISD::FrameIndex) 6221 return false; 6222 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 6223 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 6224 int FS = MFI->getObjectSize(FI); 6225 int BFS = MFI->getObjectSize(BFI); 6226 if (FS != BFS || FS != Size) return false; 6227 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size); 6228 } else { 6229 GlobalValue *GV1 = NULL; 6230 GlobalValue *GV2 = NULL; 6231 int64_t Offset1 = 0; 6232 int64_t Offset2 = 0; 6233 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 6234 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 6235 if (isGA1 && isGA2 && GV1 == GV2) 6236 return Offset1 == (Offset2 + Dist*Size); 6237 } 6238 6239 return false; 6240} 6241 6242static bool isBaseAlignmentOfN(unsigned N, SDNode *Base, MachineFrameInfo *MFI, 6243 const X86Subtarget *Subtarget) { 6244 GlobalValue *GV; 6245 int64_t Offset = 0; 6246 if (isGAPlusOffset(Base, GV, Offset)) 6247 return (GV->getAlignment() >= N && (Offset % N) == 0); 6248 // DAG combine handles the stack object case. 6249 return false; 6250} 6251 6252static bool EltsFromConsecutiveLoads(SDNode *N, SDOperand PermMask, 6253 unsigned NumElems, MVT::ValueType EVT, 6254 MachineFrameInfo *MFI, 6255 SelectionDAG &DAG, SDNode *&Base) { 6256 Base = NULL; 6257 for (unsigned i = 0; i < NumElems; ++i) { 6258 SDOperand Idx = PermMask.getOperand(i); 6259 if (Idx.getOpcode() == ISD::UNDEF) { 6260 if (!Base) 6261 return false; 6262 continue; 6263 } 6264 6265 unsigned Index = cast<ConstantSDNode>(Idx)->getValue(); 6266 SDOperand Elt = getShuffleScalarElt(N, Index, DAG); 6267 if (!Elt.Val || 6268 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.Val))) 6269 return false; 6270 if (!Base) { 6271 Base = Elt.Val; 6272 continue; 6273 } 6274 if (Elt.getOpcode() == ISD::UNDEF) 6275 continue; 6276 6277 if (!isConsecutiveLoad(Elt.Val, Base, i, MVT::getSizeInBits(EVT)/8,MFI)) 6278 return false; 6279 } 6280 return true; 6281} 6282 6283/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 6284/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 6285/// if the load addresses are consecutive, non-overlapping, and in the right 6286/// order. 6287static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 6288 const X86Subtarget *Subtarget) { 6289 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 6290 MVT::ValueType VT = N->getValueType(0); 6291 MVT::ValueType EVT = MVT::getVectorElementType(VT); 6292 SDOperand PermMask = N->getOperand(2); 6293 unsigned NumElems = PermMask.getNumOperands(); 6294 SDNode *Base = NULL; 6295 if (!EltsFromConsecutiveLoads(N, PermMask, NumElems, EVT, MFI, DAG, Base)) 6296 return SDOperand(); 6297 6298 LoadSDNode *LD = cast<LoadSDNode>(Base); 6299 if (isBaseAlignmentOfN(16, Base->getOperand(1).Val, MFI, Subtarget)) 6300 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 6301 LD->getSrcValueOffset(), LD->isVolatile()); 6302 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 6303 LD->getSrcValueOffset(), LD->isVolatile(), 6304 LD->getAlignment()); 6305} 6306 6307static SDNode *getBuildPairElt(SDNode *N, unsigned i) { 6308 SDOperand Elt = N->getOperand(i); 6309 if (Elt.getOpcode() != ISD::MERGE_VALUES) 6310 return Elt.Val; 6311 return Elt.getOperand(Elt.ResNo).Val; 6312} 6313 6314static SDOperand PerformBuildVectorCombine(SDNode *N, SelectionDAG &DAG, 6315 const X86Subtarget *Subtarget) { 6316 // Ignore single operand BUILD_VECTOR. 6317 if (N->getNumOperands() == 1) 6318 return SDOperand(); 6319 6320 MVT::ValueType VT = N->getValueType(0); 6321 MVT::ValueType EVT = MVT::getVectorElementType(VT); 6322 if ((EVT != MVT::i64 && EVT != MVT::f64) || Subtarget->is64Bit()) 6323 // We are looking for load i64 and zero extend. We want to transform 6324 // it before legalizer has a chance to expand it. Also look for i64 6325 // BUILD_PAIR bit casted to f64. 6326 return SDOperand(); 6327 // This must be an insertion into a zero vector. 6328 SDOperand HighElt = N->getOperand(1); 6329 if (!isZeroNode(HighElt)) 6330 return SDOperand(); 6331 6332 // Value must be a load. 6333 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 6334 SDNode *Base = N->getOperand(0).Val; 6335 if (!isa<LoadSDNode>(Base)) { 6336 if (Base->getOpcode() == ISD::BIT_CONVERT) 6337 Base = Base->getOperand(0).Val; 6338 if (Base->getOpcode() != ISD::BUILD_PAIR) 6339 return SDOperand(); 6340 SDNode *Pair = Base; 6341 Base = getBuildPairElt(Pair, 0); 6342 if (!ISD::isNON_EXTLoad(Base)) 6343 return SDOperand(); 6344 SDNode *NextLD = getBuildPairElt(Pair, 1); 6345 if (!ISD::isNON_EXTLoad(NextLD) || 6346 !isConsecutiveLoad(NextLD, Base, 1, 4/*32 bits*/, MFI)) 6347 return SDOperand(); 6348 } 6349 LoadSDNode *LD = cast<LoadSDNode>(Base); 6350 6351 // Transform it into VZEXT_LOAD addr. 6352 return DAG.getNode(X86ISD::VZEXT_LOAD, VT, LD->getChain(), LD->getBasePtr()); 6353} 6354 6355/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 6356static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 6357 const X86Subtarget *Subtarget) { 6358 SDOperand Cond = N->getOperand(0); 6359 6360 // If we have SSE[12] support, try to form min/max nodes. 6361 if (Subtarget->hasSSE2() && 6362 (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) { 6363 if (Cond.getOpcode() == ISD::SETCC) { 6364 // Get the LHS/RHS of the select. 6365 SDOperand LHS = N->getOperand(1); 6366 SDOperand RHS = N->getOperand(2); 6367 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 6368 6369 unsigned Opcode = 0; 6370 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) { 6371 switch (CC) { 6372 default: break; 6373 case ISD::SETOLE: // (X <= Y) ? X : Y -> min 6374 case ISD::SETULE: 6375 case ISD::SETLE: 6376 if (!UnsafeFPMath) break; 6377 // FALL THROUGH. 6378 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min 6379 case ISD::SETLT: 6380 Opcode = X86ISD::FMIN; 6381 break; 6382 6383 case ISD::SETOGT: // (X > Y) ? X : Y -> max 6384 case ISD::SETUGT: 6385 case ISD::SETGT: 6386 if (!UnsafeFPMath) break; 6387 // FALL THROUGH. 6388 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max 6389 case ISD::SETGE: 6390 Opcode = X86ISD::FMAX; 6391 break; 6392 } 6393 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) { 6394 switch (CC) { 6395 default: break; 6396 case ISD::SETOGT: // (X > Y) ? Y : X -> min 6397 case ISD::SETUGT: 6398 case ISD::SETGT: 6399 if (!UnsafeFPMath) break; 6400 // FALL THROUGH. 6401 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min 6402 case ISD::SETGE: 6403 Opcode = X86ISD::FMIN; 6404 break; 6405 6406 case ISD::SETOLE: // (X <= Y) ? Y : X -> max 6407 case ISD::SETULE: 6408 case ISD::SETLE: 6409 if (!UnsafeFPMath) break; 6410 // FALL THROUGH. 6411 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max 6412 case ISD::SETLT: 6413 Opcode = X86ISD::FMAX; 6414 break; 6415 } 6416 } 6417 6418 if (Opcode) 6419 return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS); 6420 } 6421 6422 } 6423 6424 return SDOperand(); 6425} 6426 6427/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 6428static SDOperand PerformSTORECombine(SDNode *N, SelectionDAG &DAG, 6429 const X86Subtarget *Subtarget) { 6430 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 6431 // the FP state in cases where an emms may be missing. 6432 // A preferable solution to the general problem is to figure out the right 6433 // places to insert EMMS. This qualifies as a quick hack. 6434 StoreSDNode *St = cast<StoreSDNode>(N); 6435 if (MVT::isVector(St->getValue().getValueType()) && 6436 MVT::getSizeInBits(St->getValue().getValueType()) == 64 && 6437 isa<LoadSDNode>(St->getValue()) && 6438 !cast<LoadSDNode>(St->getValue())->isVolatile() && 6439 St->getChain().hasOneUse() && !St->isVolatile()) { 6440 SDNode* LdVal = St->getValue().Val; 6441 LoadSDNode *Ld = 0; 6442 int TokenFactorIndex = -1; 6443 SmallVector<SDOperand, 8> Ops; 6444 SDNode* ChainVal = St->getChain().Val; 6445 // Must be a store of a load. We currently handle two cases: the load 6446 // is a direct child, and it's under an intervening TokenFactor. It is 6447 // possible to dig deeper under nested TokenFactors. 6448 if (ChainVal == LdVal) 6449 Ld = cast<LoadSDNode>(St->getChain()); 6450 else if (St->getValue().hasOneUse() && 6451 ChainVal->getOpcode() == ISD::TokenFactor) { 6452 for (unsigned i=0, e = ChainVal->getNumOperands(); i != e; ++i) { 6453 if (ChainVal->getOperand(i).Val == LdVal) { 6454 TokenFactorIndex = i; 6455 Ld = cast<LoadSDNode>(St->getValue()); 6456 } else 6457 Ops.push_back(ChainVal->getOperand(i)); 6458 } 6459 } 6460 if (Ld) { 6461 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 6462 if (Subtarget->is64Bit()) { 6463 SDOperand NewLd = DAG.getLoad(MVT::i64, Ld->getChain(), 6464 Ld->getBasePtr(), Ld->getSrcValue(), 6465 Ld->getSrcValueOffset(), Ld->isVolatile(), 6466 Ld->getAlignment()); 6467 SDOperand NewChain = NewLd.getValue(1); 6468 if (TokenFactorIndex != -1) { 6469 Ops.push_back(NewChain); 6470 NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0], 6471 Ops.size()); 6472 } 6473 return DAG.getStore(NewChain, NewLd, St->getBasePtr(), 6474 St->getSrcValue(), St->getSrcValueOffset(), 6475 St->isVolatile(), St->getAlignment()); 6476 } 6477 6478 // Otherwise, lower to two 32-bit copies. 6479 SDOperand LoAddr = Ld->getBasePtr(); 6480 SDOperand HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, 6481 DAG.getConstant(MVT::i32, 4)); 6482 6483 SDOperand LoLd = DAG.getLoad(MVT::i32, Ld->getChain(), LoAddr, 6484 Ld->getSrcValue(), Ld->getSrcValueOffset(), 6485 Ld->isVolatile(), Ld->getAlignment()); 6486 SDOperand HiLd = DAG.getLoad(MVT::i32, Ld->getChain(), HiAddr, 6487 Ld->getSrcValue(), Ld->getSrcValueOffset()+4, 6488 Ld->isVolatile(), 6489 MinAlign(Ld->getAlignment(), 4)); 6490 6491 SDOperand NewChain = LoLd.getValue(1); 6492 if (TokenFactorIndex != -1) { 6493 Ops.push_back(LoLd); 6494 Ops.push_back(HiLd); 6495 NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0], 6496 Ops.size()); 6497 } 6498 6499 LoAddr = St->getBasePtr(); 6500 HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, 6501 DAG.getConstant(MVT::i32, 4)); 6502 6503 SDOperand LoSt = DAG.getStore(NewChain, LoLd, LoAddr, 6504 St->getSrcValue(), St->getSrcValueOffset(), 6505 St->isVolatile(), St->getAlignment()); 6506 SDOperand HiSt = DAG.getStore(NewChain, HiLd, HiAddr, 6507 St->getSrcValue(), St->getSrcValueOffset()+4, 6508 St->isVolatile(), 6509 MinAlign(St->getAlignment(), 4)); 6510 return DAG.getNode(ISD::TokenFactor, MVT::Other, LoSt, HiSt); 6511 } 6512 } 6513 return SDOperand(); 6514} 6515 6516/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 6517/// X86ISD::FXOR nodes. 6518static SDOperand PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 6519 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 6520 // F[X]OR(0.0, x) -> x 6521 // F[X]OR(x, 0.0) -> x 6522 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 6523 if (C->getValueAPF().isPosZero()) 6524 return N->getOperand(1); 6525 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 6526 if (C->getValueAPF().isPosZero()) 6527 return N->getOperand(0); 6528 return SDOperand(); 6529} 6530 6531/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 6532static SDOperand PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 6533 // FAND(0.0, x) -> 0.0 6534 // FAND(x, 0.0) -> 0.0 6535 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 6536 if (C->getValueAPF().isPosZero()) 6537 return N->getOperand(0); 6538 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 6539 if (C->getValueAPF().isPosZero()) 6540 return N->getOperand(1); 6541 return SDOperand(); 6542} 6543 6544 6545SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, 6546 DAGCombinerInfo &DCI) const { 6547 SelectionDAG &DAG = DCI.DAG; 6548 switch (N->getOpcode()) { 6549 default: break; 6550 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, Subtarget); 6551 case ISD::BUILD_VECTOR: return PerformBuildVectorCombine(N, DAG, Subtarget); 6552 case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget); 6553 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); 6554 case X86ISD::FXOR: 6555 case X86ISD::FOR: return PerformFORCombine(N, DAG); 6556 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 6557 } 6558 6559 return SDOperand(); 6560} 6561 6562//===----------------------------------------------------------------------===// 6563// X86 Inline Assembly Support 6564//===----------------------------------------------------------------------===// 6565 6566/// getConstraintType - Given a constraint letter, return the type of 6567/// constraint it is for this target. 6568X86TargetLowering::ConstraintType 6569X86TargetLowering::getConstraintType(const std::string &Constraint) const { 6570 if (Constraint.size() == 1) { 6571 switch (Constraint[0]) { 6572 case 'A': 6573 case 'f': 6574 case 'r': 6575 case 'R': 6576 case 'l': 6577 case 'q': 6578 case 'Q': 6579 case 'x': 6580 case 'y': 6581 case 'Y': 6582 return C_RegisterClass; 6583 default: 6584 break; 6585 } 6586 } 6587 return TargetLowering::getConstraintType(Constraint); 6588} 6589 6590/// LowerXConstraint - try to replace an X constraint, which matches anything, 6591/// with another that has more specific requirements based on the type of the 6592/// corresponding operand. 6593const char *X86TargetLowering:: 6594LowerXConstraint(MVT::ValueType ConstraintVT) const { 6595 // FP X constraints get lowered to SSE1/2 registers if available, otherwise 6596 // 'f' like normal targets. 6597 if (MVT::isFloatingPoint(ConstraintVT)) { 6598 if (Subtarget->hasSSE2()) 6599 return "Y"; 6600 if (Subtarget->hasSSE1()) 6601 return "x"; 6602 } 6603 6604 return TargetLowering::LowerXConstraint(ConstraintVT); 6605} 6606 6607/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 6608/// vector. If it is invalid, don't add anything to Ops. 6609void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, 6610 char Constraint, 6611 std::vector<SDOperand>&Ops, 6612 SelectionDAG &DAG) const { 6613 SDOperand Result(0, 0); 6614 6615 switch (Constraint) { 6616 default: break; 6617 case 'I': 6618 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 6619 if (C->getValue() <= 31) { 6620 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 6621 break; 6622 } 6623 } 6624 return; 6625 case 'N': 6626 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 6627 if (C->getValue() <= 255) { 6628 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 6629 break; 6630 } 6631 } 6632 return; 6633 case 'i': { 6634 // Literal immediates are always ok. 6635 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 6636 Result = DAG.getTargetConstant(CST->getValue(), Op.getValueType()); 6637 break; 6638 } 6639 6640 // If we are in non-pic codegen mode, we allow the address of a global (with 6641 // an optional displacement) to be used with 'i'. 6642 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 6643 int64_t Offset = 0; 6644 6645 // Match either (GA) or (GA+C) 6646 if (GA) { 6647 Offset = GA->getOffset(); 6648 } else if (Op.getOpcode() == ISD::ADD) { 6649 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6650 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 6651 if (C && GA) { 6652 Offset = GA->getOffset()+C->getValue(); 6653 } else { 6654 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6655 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 6656 if (C && GA) 6657 Offset = GA->getOffset()+C->getValue(); 6658 else 6659 C = 0, GA = 0; 6660 } 6661 } 6662 6663 if (GA) { 6664 // If addressing this global requires a load (e.g. in PIC mode), we can't 6665 // match. 6666 if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(), 6667 false)) 6668 return; 6669 6670 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), 6671 Offset); 6672 Result = Op; 6673 break; 6674 } 6675 6676 // Otherwise, not valid for this mode. 6677 return; 6678 } 6679 } 6680 6681 if (Result.Val) { 6682 Ops.push_back(Result); 6683 return; 6684 } 6685 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 6686} 6687 6688std::vector<unsigned> X86TargetLowering:: 6689getRegClassForInlineAsmConstraint(const std::string &Constraint, 6690 MVT::ValueType VT) const { 6691 if (Constraint.size() == 1) { 6692 // FIXME: not handling fp-stack yet! 6693 switch (Constraint[0]) { // GCC X86 Constraint Letters 6694 default: break; // Unknown constraint letter 6695 case 'A': // EAX/EDX 6696 if (VT == MVT::i32 || VT == MVT::i64) 6697 return make_vector<unsigned>(X86::EAX, X86::EDX, 0); 6698 break; 6699 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode) 6700 case 'Q': // Q_REGS 6701 if (VT == MVT::i32) 6702 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 6703 else if (VT == MVT::i16) 6704 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 6705 else if (VT == MVT::i8) 6706 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0); 6707 else if (VT == MVT::i64) 6708 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0); 6709 break; 6710 } 6711 } 6712 6713 return std::vector<unsigned>(); 6714} 6715 6716std::pair<unsigned, const TargetRegisterClass*> 6717X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 6718 MVT::ValueType VT) const { 6719 // First, see if this is a constraint that directly corresponds to an LLVM 6720 // register class. 6721 if (Constraint.size() == 1) { 6722 // GCC Constraint Letters 6723 switch (Constraint[0]) { 6724 default: break; 6725 case 'r': // GENERAL_REGS 6726 case 'R': // LEGACY_REGS 6727 case 'l': // INDEX_REGS 6728 if (VT == MVT::i64 && Subtarget->is64Bit()) 6729 return std::make_pair(0U, X86::GR64RegisterClass); 6730 if (VT == MVT::i32) 6731 return std::make_pair(0U, X86::GR32RegisterClass); 6732 else if (VT == MVT::i16) 6733 return std::make_pair(0U, X86::GR16RegisterClass); 6734 else if (VT == MVT::i8) 6735 return std::make_pair(0U, X86::GR8RegisterClass); 6736 break; 6737 case 'f': // FP Stack registers. 6738 // If SSE is enabled for this VT, use f80 to ensure the isel moves the 6739 // value to the correct fpstack register class. 6740 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) 6741 return std::make_pair(0U, X86::RFP32RegisterClass); 6742 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) 6743 return std::make_pair(0U, X86::RFP64RegisterClass); 6744 return std::make_pair(0U, X86::RFP80RegisterClass); 6745 case 'y': // MMX_REGS if MMX allowed. 6746 if (!Subtarget->hasMMX()) break; 6747 return std::make_pair(0U, X86::VR64RegisterClass); 6748 break; 6749 case 'Y': // SSE_REGS if SSE2 allowed 6750 if (!Subtarget->hasSSE2()) break; 6751 // FALL THROUGH. 6752 case 'x': // SSE_REGS if SSE1 allowed 6753 if (!Subtarget->hasSSE1()) break; 6754 6755 switch (VT) { 6756 default: break; 6757 // Scalar SSE types. 6758 case MVT::f32: 6759 case MVT::i32: 6760 return std::make_pair(0U, X86::FR32RegisterClass); 6761 case MVT::f64: 6762 case MVT::i64: 6763 return std::make_pair(0U, X86::FR64RegisterClass); 6764 // Vector types. 6765 case MVT::v16i8: 6766 case MVT::v8i16: 6767 case MVT::v4i32: 6768 case MVT::v2i64: 6769 case MVT::v4f32: 6770 case MVT::v2f64: 6771 return std::make_pair(0U, X86::VR128RegisterClass); 6772 } 6773 break; 6774 } 6775 } 6776 6777 // Use the default implementation in TargetLowering to convert the register 6778 // constraint into a member of a register class. 6779 std::pair<unsigned, const TargetRegisterClass*> Res; 6780 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 6781 6782 // Not found as a standard register? 6783 if (Res.second == 0) { 6784 // GCC calls "st(0)" just plain "st". 6785 if (StringsEqualNoCase("{st}", Constraint)) { 6786 Res.first = X86::ST0; 6787 Res.second = X86::RFP80RegisterClass; 6788 } 6789 6790 return Res; 6791 } 6792 6793 // Otherwise, check to see if this is a register class of the wrong value 6794 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 6795 // turn into {ax},{dx}. 6796 if (Res.second->hasType(VT)) 6797 return Res; // Correct type already, nothing to do. 6798 6799 // All of the single-register GCC register classes map their values onto 6800 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 6801 // really want an 8-bit or 32-bit register, map to the appropriate register 6802 // class and return the appropriate register. 6803 if (Res.second != X86::GR16RegisterClass) 6804 return Res; 6805 6806 if (VT == MVT::i8) { 6807 unsigned DestReg = 0; 6808 switch (Res.first) { 6809 default: break; 6810 case X86::AX: DestReg = X86::AL; break; 6811 case X86::DX: DestReg = X86::DL; break; 6812 case X86::CX: DestReg = X86::CL; break; 6813 case X86::BX: DestReg = X86::BL; break; 6814 } 6815 if (DestReg) { 6816 Res.first = DestReg; 6817 Res.second = Res.second = X86::GR8RegisterClass; 6818 } 6819 } else if (VT == MVT::i32) { 6820 unsigned DestReg = 0; 6821 switch (Res.first) { 6822 default: break; 6823 case X86::AX: DestReg = X86::EAX; break; 6824 case X86::DX: DestReg = X86::EDX; break; 6825 case X86::CX: DestReg = X86::ECX; break; 6826 case X86::BX: DestReg = X86::EBX; break; 6827 case X86::SI: DestReg = X86::ESI; break; 6828 case X86::DI: DestReg = X86::EDI; break; 6829 case X86::BP: DestReg = X86::EBP; break; 6830 case X86::SP: DestReg = X86::ESP; break; 6831 } 6832 if (DestReg) { 6833 Res.first = DestReg; 6834 Res.second = Res.second = X86::GR32RegisterClass; 6835 } 6836 } else if (VT == MVT::i64) { 6837 unsigned DestReg = 0; 6838 switch (Res.first) { 6839 default: break; 6840 case X86::AX: DestReg = X86::RAX; break; 6841 case X86::DX: DestReg = X86::RDX; break; 6842 case X86::CX: DestReg = X86::RCX; break; 6843 case X86::BX: DestReg = X86::RBX; break; 6844 case X86::SI: DestReg = X86::RSI; break; 6845 case X86::DI: DestReg = X86::RDI; break; 6846 case X86::BP: DestReg = X86::RBP; break; 6847 case X86::SP: DestReg = X86::RSP; break; 6848 } 6849 if (DestReg) { 6850 Res.first = DestReg; 6851 Res.second = Res.second = X86::GR64RegisterClass; 6852 } 6853 } 6854 6855 return Res; 6856} 6857