X86ISelLowering.cpp revision 71f489d72856a0424146a1e24642dfac3ae15522
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86ISelLowering.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86TargetMachine.h" 20#include "llvm/CallingConv.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/ADT/BitVector.h" 27#include "llvm/ADT/VectorExtras.h" 28#include "llvm/Analysis/ScalarEvolutionExpressions.h" 29#include "llvm/CodeGen/CallingConvLower.h" 30#include "llvm/CodeGen/MachineFrameInfo.h" 31#include "llvm/CodeGen/MachineFunction.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/MachineModuleInfo.h" 34#include "llvm/CodeGen/MachineRegisterInfo.h" 35#include "llvm/CodeGen/PseudoSourceValue.h" 36#include "llvm/CodeGen/SelectionDAG.h" 37#include "llvm/Support/MathExtras.h" 38#include "llvm/Support/Debug.h" 39#include "llvm/Target/TargetOptions.h" 40#include "llvm/ADT/SmallSet.h" 41#include "llvm/ADT/StringExtras.h" 42using namespace llvm; 43 44// Forward declarations. 45static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG); 46 47X86TargetLowering::X86TargetLowering(TargetMachine &TM) 48 : TargetLowering(TM) { 49 Subtarget = &TM.getSubtarget<X86Subtarget>(); 50 X86ScalarSSEf64 = Subtarget->hasSSE2(); 51 X86ScalarSSEf32 = Subtarget->hasSSE1(); 52 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 53 54 bool Fast = false; 55 56 RegInfo = TM.getRegisterInfo(); 57 58 // Set up the TargetLowering object. 59 60 // X86 is weird, it always uses i8 for shift amounts and setcc results. 61 setShiftAmountType(MVT::i8); 62 setSetCCResultContents(ZeroOrOneSetCCResult); 63 setSchedulingPreference(SchedulingForRegPressure); 64 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0 65 setStackPointerRegisterToSaveRestore(X86StackPtr); 66 67 if (Subtarget->isTargetDarwin()) { 68 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 69 setUseUnderscoreSetJmp(false); 70 setUseUnderscoreLongJmp(false); 71 } else if (Subtarget->isTargetMingw()) { 72 // MS runtime is weird: it exports _setjmp, but longjmp! 73 setUseUnderscoreSetJmp(true); 74 setUseUnderscoreLongJmp(false); 75 } else { 76 setUseUnderscoreSetJmp(true); 77 setUseUnderscoreLongJmp(true); 78 } 79 80 // Set up the register classes. 81 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 82 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 83 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 84 if (Subtarget->is64Bit()) 85 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 86 87 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote); 88 89 // We don't accept any truncstore of integer registers. 90 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 91 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 92 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 93 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 94 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 95 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 96 97 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 98 // operation. 99 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 100 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 101 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 102 103 if (Subtarget->is64Bit()) { 104 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 105 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 106 } else { 107 if (X86ScalarSSEf64) 108 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP. 109 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand); 110 else 111 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 112 } 113 114 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 115 // this operation. 116 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 117 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 118 // SSE has no i16 to fp conversion, only i32 119 if (X86ScalarSSEf32) { 120 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 121 // f32 and f64 cases are Legal, f80 case is not 122 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 123 } else { 124 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 125 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 126 } 127 128 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 129 // are Legal, f80 is custom lowered. 130 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 131 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 132 133 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 134 // this operation. 135 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 136 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 137 138 if (X86ScalarSSEf32) { 139 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 140 // f32 and f64 cases are Legal, f80 case is not 141 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 142 } else { 143 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 144 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 145 } 146 147 // Handle FP_TO_UINT by promoting the destination to a larger signed 148 // conversion. 149 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 150 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 151 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 152 153 if (Subtarget->is64Bit()) { 154 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 155 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 156 } else { 157 if (X86ScalarSSEf32 && !Subtarget->hasSSE3()) 158 // Expand FP_TO_UINT into a select. 159 // FIXME: We would like to use a Custom expander here eventually to do 160 // the optimal thing for SSE vs. the default expansion in the legalizer. 161 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 162 else 163 // With SSE3 we can use fisttpll to convert to a signed i64. 164 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 165 } 166 167 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 168 if (!X86ScalarSSEf64) { 169 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 170 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 171 } 172 173 // Scalar integer divide and remainder are lowered to use operations that 174 // produce two results, to match the available instructions. This exposes 175 // the two-result form to trivial CSE, which is able to combine x/y and x%y 176 // into a single instruction. 177 // 178 // Scalar integer multiply-high is also lowered to use two-result 179 // operations, to match the available instructions. However, plain multiply 180 // (low) operations are left as Legal, as there are single-result 181 // instructions for this in x86. Using the two-result multiply instructions 182 // when both high and low results are needed must be arranged by dagcombine. 183 setOperationAction(ISD::MULHS , MVT::i8 , Expand); 184 setOperationAction(ISD::MULHU , MVT::i8 , Expand); 185 setOperationAction(ISD::SDIV , MVT::i8 , Expand); 186 setOperationAction(ISD::UDIV , MVT::i8 , Expand); 187 setOperationAction(ISD::SREM , MVT::i8 , Expand); 188 setOperationAction(ISD::UREM , MVT::i8 , Expand); 189 setOperationAction(ISD::MULHS , MVT::i16 , Expand); 190 setOperationAction(ISD::MULHU , MVT::i16 , Expand); 191 setOperationAction(ISD::SDIV , MVT::i16 , Expand); 192 setOperationAction(ISD::UDIV , MVT::i16 , Expand); 193 setOperationAction(ISD::SREM , MVT::i16 , Expand); 194 setOperationAction(ISD::UREM , MVT::i16 , Expand); 195 setOperationAction(ISD::MULHS , MVT::i32 , Expand); 196 setOperationAction(ISD::MULHU , MVT::i32 , Expand); 197 setOperationAction(ISD::SDIV , MVT::i32 , Expand); 198 setOperationAction(ISD::UDIV , MVT::i32 , Expand); 199 setOperationAction(ISD::SREM , MVT::i32 , Expand); 200 setOperationAction(ISD::UREM , MVT::i32 , Expand); 201 setOperationAction(ISD::MULHS , MVT::i64 , Expand); 202 setOperationAction(ISD::MULHU , MVT::i64 , Expand); 203 setOperationAction(ISD::SDIV , MVT::i64 , Expand); 204 setOperationAction(ISD::UDIV , MVT::i64 , Expand); 205 setOperationAction(ISD::SREM , MVT::i64 , Expand); 206 setOperationAction(ISD::UREM , MVT::i64 , Expand); 207 208 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 209 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 210 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 211 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 212 if (Subtarget->is64Bit()) 213 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 214 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 215 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 216 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 217 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 218 setOperationAction(ISD::FREM , MVT::f32 , Expand); 219 setOperationAction(ISD::FREM , MVT::f64 , Expand); 220 setOperationAction(ISD::FREM , MVT::f80 , Expand); 221 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 222 223 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 224 setOperationAction(ISD::CTTZ , MVT::i8 , Custom); 225 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 226 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 227 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 228 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 229 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 230 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 231 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 232 if (Subtarget->is64Bit()) { 233 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 234 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 235 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 236 } 237 238 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 239 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 240 241 // These should be promoted to a larger select which is supported. 242 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 243 setOperationAction(ISD::SELECT , MVT::i8 , Promote); 244 // X86 wants to expand cmov itself. 245 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 246 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 247 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 248 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 249 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 250 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 251 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 252 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 253 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 254 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 255 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 256 if (Subtarget->is64Bit()) { 257 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 258 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 259 } 260 // X86 ret instruction may pop stack. 261 setOperationAction(ISD::RET , MVT::Other, Custom); 262 if (!Subtarget->is64Bit()) 263 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 264 265 // Darwin ABI issue. 266 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 267 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 268 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 269 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 270 if (Subtarget->is64Bit()) 271 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 272 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 273 if (Subtarget->is64Bit()) { 274 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 275 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 276 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 277 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 278 } 279 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 280 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 281 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 282 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 283 if (Subtarget->is64Bit()) { 284 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 285 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 286 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 287 } 288 289 if (Subtarget->hasSSE1()) 290 setOperationAction(ISD::PREFETCH , MVT::Other, Legal); 291 292 if (!Subtarget->hasSSE2()) 293 setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand); 294 295 // Expand certain atomics 296 setOperationAction(ISD::ATOMIC_LCS , MVT::i8, Custom); 297 setOperationAction(ISD::ATOMIC_LCS , MVT::i16, Custom); 298 setOperationAction(ISD::ATOMIC_LCS , MVT::i32, Custom); 299 setOperationAction(ISD::ATOMIC_LCS , MVT::i64, Custom); 300 setOperationAction(ISD::ATOMIC_LSS , MVT::i32, Expand); 301 302 // Use the default ISD::LOCATION, ISD::DECLARE expansion. 303 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 304 // FIXME - use subtarget debug flags 305 if (!Subtarget->isTargetDarwin() && 306 !Subtarget->isTargetELF() && 307 !Subtarget->isTargetCygMing()) 308 setOperationAction(ISD::LABEL, MVT::Other, Expand); 309 310 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 311 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 312 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 313 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 314 if (Subtarget->is64Bit()) { 315 // FIXME: Verify 316 setExceptionPointerRegister(X86::RAX); 317 setExceptionSelectorRegister(X86::RDX); 318 } else { 319 setExceptionPointerRegister(X86::EAX); 320 setExceptionSelectorRegister(X86::EDX); 321 } 322 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 323 324 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 325 326 setOperationAction(ISD::TRAP, MVT::Other, Legal); 327 328 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 329 setOperationAction(ISD::VASTART , MVT::Other, Custom); 330 setOperationAction(ISD::VAARG , MVT::Other, Expand); 331 setOperationAction(ISD::VAEND , MVT::Other, Expand); 332 if (Subtarget->is64Bit()) 333 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 334 else 335 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 336 337 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 338 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 339 if (Subtarget->is64Bit()) 340 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 341 if (Subtarget->isTargetCygMing()) 342 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 343 else 344 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 345 346 if (X86ScalarSSEf64) { 347 // f32 and f64 use SSE. 348 // Set up the FP register classes. 349 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 350 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 351 352 // Use ANDPD to simulate FABS. 353 setOperationAction(ISD::FABS , MVT::f64, Custom); 354 setOperationAction(ISD::FABS , MVT::f32, Custom); 355 356 // Use XORP to simulate FNEG. 357 setOperationAction(ISD::FNEG , MVT::f64, Custom); 358 setOperationAction(ISD::FNEG , MVT::f32, Custom); 359 360 // Use ANDPD and ORPD to simulate FCOPYSIGN. 361 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 362 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 363 364 // We don't support sin/cos/fmod 365 setOperationAction(ISD::FSIN , MVT::f64, Expand); 366 setOperationAction(ISD::FCOS , MVT::f64, Expand); 367 setOperationAction(ISD::FSIN , MVT::f32, Expand); 368 setOperationAction(ISD::FCOS , MVT::f32, Expand); 369 370 // Expand FP immediates into loads from the stack, except for the special 371 // cases we handle. 372 addLegalFPImmediate(APFloat(+0.0)); // xorpd 373 addLegalFPImmediate(APFloat(+0.0f)); // xorps 374 375 // Floating truncations from f80 and extensions to f80 go through memory. 376 // If optimizing, we lie about this though and handle it in 377 // InstructionSelectPreprocess so that dagcombine2 can hack on these. 378 if (Fast) { 379 setConvertAction(MVT::f32, MVT::f80, Expand); 380 setConvertAction(MVT::f64, MVT::f80, Expand); 381 setConvertAction(MVT::f80, MVT::f32, Expand); 382 setConvertAction(MVT::f80, MVT::f64, Expand); 383 } 384 } else if (X86ScalarSSEf32) { 385 // Use SSE for f32, x87 for f64. 386 // Set up the FP register classes. 387 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 388 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 389 390 // Use ANDPS to simulate FABS. 391 setOperationAction(ISD::FABS , MVT::f32, Custom); 392 393 // Use XORP to simulate FNEG. 394 setOperationAction(ISD::FNEG , MVT::f32, Custom); 395 396 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 397 398 // Use ANDPS and ORPS to simulate FCOPYSIGN. 399 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 400 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 401 402 // We don't support sin/cos/fmod 403 setOperationAction(ISD::FSIN , MVT::f32, Expand); 404 setOperationAction(ISD::FCOS , MVT::f32, Expand); 405 406 // Special cases we handle for FP constants. 407 addLegalFPImmediate(APFloat(+0.0f)); // xorps 408 addLegalFPImmediate(APFloat(+0.0)); // FLD0 409 addLegalFPImmediate(APFloat(+1.0)); // FLD1 410 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 411 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 412 413 // SSE <-> X87 conversions go through memory. If optimizing, we lie about 414 // this though and handle it in InstructionSelectPreprocess so that 415 // dagcombine2 can hack on these. 416 if (Fast) { 417 setConvertAction(MVT::f32, MVT::f64, Expand); 418 setConvertAction(MVT::f32, MVT::f80, Expand); 419 setConvertAction(MVT::f80, MVT::f32, Expand); 420 setConvertAction(MVT::f64, MVT::f32, Expand); 421 // And x87->x87 truncations also. 422 setConvertAction(MVT::f80, MVT::f64, Expand); 423 } 424 425 if (!UnsafeFPMath) { 426 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 427 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 428 } 429 } else { 430 // f32 and f64 in x87. 431 // Set up the FP register classes. 432 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 433 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 434 435 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 436 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 437 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 438 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 439 440 // Floating truncations go through memory. If optimizing, we lie about 441 // this though and handle it in InstructionSelectPreprocess so that 442 // dagcombine2 can hack on these. 443 if (Fast) { 444 setConvertAction(MVT::f80, MVT::f32, Expand); 445 setConvertAction(MVT::f64, MVT::f32, Expand); 446 setConvertAction(MVT::f80, MVT::f64, Expand); 447 } 448 449 if (!UnsafeFPMath) { 450 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 451 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 452 } 453 addLegalFPImmediate(APFloat(+0.0)); // FLD0 454 addLegalFPImmediate(APFloat(+1.0)); // FLD1 455 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 456 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 457 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 458 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 459 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 460 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 461 } 462 463 // Long double always uses X87. 464 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 465 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 466 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 467 { 468 APFloat TmpFlt(+0.0); 469 TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 470 addLegalFPImmediate(TmpFlt); // FLD0 471 TmpFlt.changeSign(); 472 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 473 APFloat TmpFlt2(+1.0); 474 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 475 addLegalFPImmediate(TmpFlt2); // FLD1 476 TmpFlt2.changeSign(); 477 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 478 } 479 480 if (!UnsafeFPMath) { 481 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 482 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 483 } 484 485 // Always use a library call for pow. 486 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 487 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 488 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 489 490 // First set operation action for all vector types to expand. Then we 491 // will selectively turn on ones that can be effectively codegen'd. 492 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 493 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 494 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); 495 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); 496 setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); 497 setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); 498 setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); 499 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 500 setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); 501 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 502 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 503 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 504 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 505 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 506 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); 507 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); 508 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 509 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 510 setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand); 511 setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand); 512 setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand); 513 setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand); 514 setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand); 515 setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand); 516 setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand); 517 setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand); 518 setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand); 519 setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand); 520 setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand); 521 setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand); 522 setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand); 523 setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand); 524 setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand); 525 setOperationAction(ISD::SHL, (MVT::ValueType)VT, Expand); 526 setOperationAction(ISD::SRA, (MVT::ValueType)VT, Expand); 527 setOperationAction(ISD::SRL, (MVT::ValueType)VT, Expand); 528 setOperationAction(ISD::ROTL, (MVT::ValueType)VT, Expand); 529 setOperationAction(ISD::ROTR, (MVT::ValueType)VT, Expand); 530 setOperationAction(ISD::BSWAP, (MVT::ValueType)VT, Expand); 531 } 532 533 if (Subtarget->hasMMX()) { 534 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass); 535 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass); 536 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass); 537 addRegisterClass(MVT::v1i64, X86::VR64RegisterClass); 538 539 // FIXME: add MMX packed arithmetics 540 541 setOperationAction(ISD::ADD, MVT::v8i8, Legal); 542 setOperationAction(ISD::ADD, MVT::v4i16, Legal); 543 setOperationAction(ISD::ADD, MVT::v2i32, Legal); 544 setOperationAction(ISD::ADD, MVT::v1i64, Legal); 545 546 setOperationAction(ISD::SUB, MVT::v8i8, Legal); 547 setOperationAction(ISD::SUB, MVT::v4i16, Legal); 548 setOperationAction(ISD::SUB, MVT::v2i32, Legal); 549 setOperationAction(ISD::SUB, MVT::v1i64, Legal); 550 551 setOperationAction(ISD::MULHS, MVT::v4i16, Legal); 552 setOperationAction(ISD::MUL, MVT::v4i16, Legal); 553 554 setOperationAction(ISD::AND, MVT::v8i8, Promote); 555 AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64); 556 setOperationAction(ISD::AND, MVT::v4i16, Promote); 557 AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64); 558 setOperationAction(ISD::AND, MVT::v2i32, Promote); 559 AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64); 560 setOperationAction(ISD::AND, MVT::v1i64, Legal); 561 562 setOperationAction(ISD::OR, MVT::v8i8, Promote); 563 AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64); 564 setOperationAction(ISD::OR, MVT::v4i16, Promote); 565 AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64); 566 setOperationAction(ISD::OR, MVT::v2i32, Promote); 567 AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64); 568 setOperationAction(ISD::OR, MVT::v1i64, Legal); 569 570 setOperationAction(ISD::XOR, MVT::v8i8, Promote); 571 AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64); 572 setOperationAction(ISD::XOR, MVT::v4i16, Promote); 573 AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64); 574 setOperationAction(ISD::XOR, MVT::v2i32, Promote); 575 AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64); 576 setOperationAction(ISD::XOR, MVT::v1i64, Legal); 577 578 setOperationAction(ISD::LOAD, MVT::v8i8, Promote); 579 AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64); 580 setOperationAction(ISD::LOAD, MVT::v4i16, Promote); 581 AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64); 582 setOperationAction(ISD::LOAD, MVT::v2i32, Promote); 583 AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64); 584 setOperationAction(ISD::LOAD, MVT::v1i64, Legal); 585 586 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); 587 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 588 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); 589 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); 590 591 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 592 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 593 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); 594 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); 595 596 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom); 597 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom); 598 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom); 599 } 600 601 if (Subtarget->hasSSE1()) { 602 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 603 604 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 605 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 606 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 607 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 608 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 609 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 610 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 611 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 612 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 613 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 614 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 615 } 616 617 if (Subtarget->hasSSE2()) { 618 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 619 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 620 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 621 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 622 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 623 624 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 625 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 626 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 627 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 628 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 629 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 630 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 631 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 632 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 633 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 634 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 635 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 636 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 637 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 638 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 639 640 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 641 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 642 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 643 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 644 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 645 646 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 647 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 648 // Do not attempt to custom lower non-power-of-2 vectors 649 if (!isPowerOf2_32(MVT::getVectorNumElements(VT))) 650 continue; 651 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); 652 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); 653 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); 654 } 655 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 656 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 657 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 658 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 659 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 660 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 661 if (Subtarget->is64Bit()) { 662 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 663 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 664 } 665 666 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 667 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 668 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); 669 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); 670 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); 671 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); 672 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); 673 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); 674 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); 675 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); 676 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 677 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); 678 } 679 680 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 681 682 // Custom lower v2i64 and v2f64 selects. 683 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 684 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 685 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 686 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 687 } 688 689 if (Subtarget->hasSSE41()) { 690 // FIXME: Do we need to handle scalar-to-vector here? 691 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 692 693 // i8 and i16 vectors are custom , because the source register and source 694 // source memory operand types are not the same width. f32 vectors are 695 // custom since the immediate controlling the insert encodes additional 696 // information. 697 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 698 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 699 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Legal); 700 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 701 702 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 703 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 704 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 705 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 706 707 if (Subtarget->is64Bit()) { 708 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal); 709 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 710 } 711 } 712 713 // We want to custom lower some of our intrinsics. 714 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 715 716 // We have target-specific dag combine patterns for the following nodes: 717 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 718 setTargetDAGCombine(ISD::SELECT); 719 setTargetDAGCombine(ISD::STORE); 720 721 computeRegisterProperties(); 722 723 // FIXME: These should be based on subtarget info. Plus, the values should 724 // be smaller when we are in optimizing for size mode. 725 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores 726 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores 727 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores 728 allowUnalignedMemoryAccesses = true; // x86 supports it! 729 setPrefLoopAlignment(16); 730} 731 732 733MVT::ValueType 734X86TargetLowering::getSetCCResultType(const SDOperand &) const { 735 return MVT::i8; 736} 737 738 739/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 740/// the desired ByVal argument alignment. 741static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) { 742 if (MaxAlign == 16) 743 return; 744 if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 745 if (VTy->getBitWidth() == 128) 746 MaxAlign = 16; 747 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 748 unsigned EltAlign = 0; 749 getMaxByValAlign(ATy->getElementType(), EltAlign); 750 if (EltAlign > MaxAlign) 751 MaxAlign = EltAlign; 752 } else if (const StructType *STy = dyn_cast<StructType>(Ty)) { 753 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 754 unsigned EltAlign = 0; 755 getMaxByValAlign(STy->getElementType(i), EltAlign); 756 if (EltAlign > MaxAlign) 757 MaxAlign = EltAlign; 758 if (MaxAlign == 16) 759 break; 760 } 761 } 762 return; 763} 764 765/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 766/// function arguments in the caller parameter area. For X86, aggregates 767/// that contain SSE vectors are placed at 16-byte boundaries while the rest 768/// are at 4-byte boundaries. 769unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const { 770 if (Subtarget->is64Bit()) 771 return getTargetData()->getABITypeAlignment(Ty); 772 unsigned Align = 4; 773 if (Subtarget->hasSSE1()) 774 getMaxByValAlign(Ty, Align); 775 return Align; 776} 777 778/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 779/// jumptable. 780SDOperand X86TargetLowering::getPICJumpTableRelocBase(SDOperand Table, 781 SelectionDAG &DAG) const { 782 if (usesGlobalOffsetTable()) 783 return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy()); 784 if (!Subtarget->isPICStyleRIPRel()) 785 return DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()); 786 return Table; 787} 788 789//===----------------------------------------------------------------------===// 790// Return Value Calling Convention Implementation 791//===----------------------------------------------------------------------===// 792 793#include "X86GenCallingConv.inc" 794 795/// LowerRET - Lower an ISD::RET node. 796SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { 797 assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args"); 798 799 SmallVector<CCValAssign, 16> RVLocs; 800 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 801 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 802 CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs); 803 CCInfo.AnalyzeReturn(Op.Val, RetCC_X86); 804 805 // If this is the first return lowered for this function, add the regs to the 806 // liveout set for the function. 807 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 808 for (unsigned i = 0; i != RVLocs.size(); ++i) 809 if (RVLocs[i].isRegLoc()) 810 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 811 } 812 SDOperand Chain = Op.getOperand(0); 813 814 // Handle tail call return. 815 Chain = GetPossiblePreceedingTailCall(Chain, X86ISD::TAILCALL); 816 if (Chain.getOpcode() == X86ISD::TAILCALL) { 817 SDOperand TailCall = Chain; 818 SDOperand TargetAddress = TailCall.getOperand(1); 819 SDOperand StackAdjustment = TailCall.getOperand(2); 820 assert(((TargetAddress.getOpcode() == ISD::Register && 821 (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::ECX || 822 cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) || 823 TargetAddress.getOpcode() == ISD::TargetExternalSymbol || 824 TargetAddress.getOpcode() == ISD::TargetGlobalAddress) && 825 "Expecting an global address, external symbol, or register"); 826 assert(StackAdjustment.getOpcode() == ISD::Constant && 827 "Expecting a const value"); 828 829 SmallVector<SDOperand,8> Operands; 830 Operands.push_back(Chain.getOperand(0)); 831 Operands.push_back(TargetAddress); 832 Operands.push_back(StackAdjustment); 833 // Copy registers used by the call. Last operand is a flag so it is not 834 // copied. 835 for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) { 836 Operands.push_back(Chain.getOperand(i)); 837 } 838 return DAG.getNode(X86ISD::TC_RETURN, MVT::Other, &Operands[0], 839 Operands.size()); 840 } 841 842 // Regular return. 843 SDOperand Flag; 844 845 SmallVector<SDOperand, 6> RetOps; 846 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 847 // Operand #1 = Bytes To Pop 848 RetOps.push_back(DAG.getConstant(getBytesToPopOnReturn(), MVT::i16)); 849 850 // Copy the result values into the output registers. 851 for (unsigned i = 0; i != RVLocs.size(); ++i) { 852 CCValAssign &VA = RVLocs[i]; 853 assert(VA.isRegLoc() && "Can only return in registers!"); 854 SDOperand ValToCopy = Op.getOperand(i*2+1); 855 856 // Returns in ST0/ST1 are handled specially: these are pushed as operands to 857 // the RET instruction and handled by the FP Stackifier. 858 if (RVLocs[i].getLocReg() == X86::ST0 || 859 RVLocs[i].getLocReg() == X86::ST1) { 860 // If this is a copy from an xmm register to ST(0), use an FPExtend to 861 // change the value to the FP stack register class. 862 if (isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) 863 ValToCopy = DAG.getNode(ISD::FP_EXTEND, MVT::f80, ValToCopy); 864 RetOps.push_back(ValToCopy); 865 // Don't emit a copytoreg. 866 continue; 867 } 868 869 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), ValToCopy, Flag); 870 Flag = Chain.getValue(1); 871 } 872 873 // The x86-64 ABI for returning structs by value requires that we copy 874 // the sret argument into %rax for the return. We saved the argument into 875 // a virtual register in the entry block, so now we copy the value out 876 // and into %rax. 877 if (Subtarget->is64Bit() && 878 DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { 879 MachineFunction &MF = DAG.getMachineFunction(); 880 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 881 unsigned Reg = FuncInfo->getSRetReturnReg(); 882 if (!Reg) { 883 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); 884 FuncInfo->setSRetReturnReg(Reg); 885 } 886 SDOperand Val = DAG.getCopyFromReg(Chain, Reg, getPointerTy()); 887 888 Chain = DAG.getCopyToReg(Chain, X86::RAX, Val, Flag); 889 Flag = Chain.getValue(1); 890 } 891 892 RetOps[0] = Chain; // Update chain. 893 894 // Add the flag if we have it. 895 if (Flag.Val) 896 RetOps.push_back(Flag); 897 898 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, &RetOps[0], RetOps.size()); 899} 900 901 902/// LowerCallResult - Lower the result values of an ISD::CALL into the 903/// appropriate copies out of appropriate physical registers. This assumes that 904/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call 905/// being lowered. The returns a SDNode with the same number of values as the 906/// ISD::CALL. 907SDNode *X86TargetLowering:: 908LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, 909 unsigned CallingConv, SelectionDAG &DAG) { 910 911 // Assign locations to each value returned by this call. 912 SmallVector<CCValAssign, 16> RVLocs; 913 bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0; 914 CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs); 915 CCInfo.AnalyzeCallResult(TheCall, RetCC_X86); 916 917 SmallVector<SDOperand, 8> ResultVals; 918 919 // Copy all of the result registers out of their specified physreg. 920 for (unsigned i = 0; i != RVLocs.size(); ++i) { 921 MVT::ValueType CopyVT = RVLocs[i].getValVT(); 922 923 // If this is a call to a function that returns an fp value on the floating 924 // point stack, but where we prefer to use the value in xmm registers, copy 925 // it out as F80 and use a truncate to move it from fp stack reg to xmm reg. 926 if (RVLocs[i].getLocReg() == X86::ST0 && 927 isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) { 928 CopyVT = MVT::f80; 929 } 930 931 Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(), 932 CopyVT, InFlag).getValue(1); 933 SDOperand Val = Chain.getValue(0); 934 InFlag = Chain.getValue(2); 935 936 if (CopyVT != RVLocs[i].getValVT()) { 937 // Round the F80 the right size, which also moves to the appropriate xmm 938 // register. 939 Val = DAG.getNode(ISD::FP_ROUND, RVLocs[i].getValVT(), Val, 940 // This truncation won't change the value. 941 DAG.getIntPtrConstant(1)); 942 } 943 944 ResultVals.push_back(Val); 945 } 946 947 // Merge everything together with a MERGE_VALUES node. 948 ResultVals.push_back(Chain); 949 return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(), 950 &ResultVals[0], ResultVals.size()).Val; 951} 952 953 954//===----------------------------------------------------------------------===// 955// C & StdCall & Fast Calling Convention implementation 956//===----------------------------------------------------------------------===// 957// StdCall calling convention seems to be standard for many Windows' API 958// routines and around. It differs from C calling convention just a little: 959// callee should clean up the stack, not caller. Symbols should be also 960// decorated in some fancy way :) It doesn't support any vector arguments. 961// For info on fast calling convention see Fast Calling Convention (tail call) 962// implementation LowerX86_32FastCCCallTo. 963 964/// AddLiveIn - This helper function adds the specified physical register to the 965/// MachineFunction as a live in value. It also creates a corresponding virtual 966/// register for it. 967static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg, 968 const TargetRegisterClass *RC) { 969 assert(RC->contains(PReg) && "Not the correct regclass!"); 970 unsigned VReg = MF.getRegInfo().createVirtualRegister(RC); 971 MF.getRegInfo().addLiveIn(PReg, VReg); 972 return VReg; 973} 974 975/// CallIsStructReturn - Determines whether a CALL node uses struct return 976/// semantics. 977static bool CallIsStructReturn(SDOperand Op) { 978 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 979 if (!NumOps) 980 return false; 981 982 return cast<ARG_FLAGSSDNode>(Op.getOperand(6))->getArgFlags().isSRet(); 983} 984 985/// ArgsAreStructReturn - Determines whether a FORMAL_ARGUMENTS node uses struct 986/// return semantics. 987static bool ArgsAreStructReturn(SDOperand Op) { 988 unsigned NumArgs = Op.Val->getNumValues() - 1; 989 if (!NumArgs) 990 return false; 991 992 return cast<ARG_FLAGSSDNode>(Op.getOperand(3))->getArgFlags().isSRet(); 993} 994 995/// IsCalleePop - Determines whether a CALL or FORMAL_ARGUMENTS node requires 996/// the callee to pop its own arguments. Callee pop is necessary to support tail 997/// calls. 998bool X86TargetLowering::IsCalleePop(SDOperand Op) { 999 bool IsVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1000 if (IsVarArg) 1001 return false; 1002 1003 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { 1004 default: 1005 return false; 1006 case CallingConv::X86_StdCall: 1007 return !Subtarget->is64Bit(); 1008 case CallingConv::X86_FastCall: 1009 return !Subtarget->is64Bit(); 1010 case CallingConv::Fast: 1011 return PerformTailCallOpt; 1012 } 1013} 1014 1015/// CCAssignFnForNode - Selects the correct CCAssignFn for a CALL or 1016/// FORMAL_ARGUMENTS node. 1017CCAssignFn *X86TargetLowering::CCAssignFnForNode(SDOperand Op) const { 1018 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1019 1020 if (Subtarget->is64Bit()) { 1021 if (Subtarget->isTargetWin64()) 1022 return CC_X86_Win64_C; 1023 else { 1024 if (CC == CallingConv::Fast && PerformTailCallOpt) 1025 return CC_X86_64_TailCall; 1026 else 1027 return CC_X86_64_C; 1028 } 1029 } 1030 1031 if (CC == CallingConv::X86_FastCall) 1032 return CC_X86_32_FastCall; 1033 else if (CC == CallingConv::Fast && PerformTailCallOpt) 1034 return CC_X86_32_TailCall; 1035 else 1036 return CC_X86_32_C; 1037} 1038 1039/// NameDecorationForFORMAL_ARGUMENTS - Selects the appropriate decoration to 1040/// apply to a MachineFunction containing a given FORMAL_ARGUMENTS node. 1041NameDecorationStyle 1042X86TargetLowering::NameDecorationForFORMAL_ARGUMENTS(SDOperand Op) { 1043 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1044 if (CC == CallingConv::X86_FastCall) 1045 return FastCall; 1046 else if (CC == CallingConv::X86_StdCall) 1047 return StdCall; 1048 return None; 1049} 1050 1051 1052/// CallRequiresGOTInRegister - Check whether the call requires the GOT pointer 1053/// in a register before calling. 1054bool X86TargetLowering::CallRequiresGOTPtrInReg(bool Is64Bit, bool IsTailCall) { 1055 return !IsTailCall && !Is64Bit && 1056 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1057 Subtarget->isPICStyleGOT(); 1058} 1059 1060/// CallRequiresFnAddressInReg - Check whether the call requires the function 1061/// address to be loaded in a register. 1062bool 1063X86TargetLowering::CallRequiresFnAddressInReg(bool Is64Bit, bool IsTailCall) { 1064 return !Is64Bit && IsTailCall && 1065 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1066 Subtarget->isPICStyleGOT(); 1067} 1068 1069/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1070/// by "Src" to address "Dst" with size and alignment information specified by 1071/// the specific parameter attribute. The copy will be passed as a byval 1072/// function parameter. 1073static SDOperand 1074CreateCopyOfByValArgument(SDOperand Src, SDOperand Dst, SDOperand Chain, 1075 ISD::ArgFlagsTy Flags, SelectionDAG &DAG) { 1076 SDOperand SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1077 return DAG.getMemcpy(Chain, Dst, Src, SizeNode, Flags.getByValAlign(), 1078 /*AlwaysInline=*/true, NULL, 0, NULL, 0); 1079} 1080 1081SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG, 1082 const CCValAssign &VA, 1083 MachineFrameInfo *MFI, 1084 unsigned CC, 1085 SDOperand Root, unsigned i) { 1086 // Create the nodes corresponding to a load from this parameter slot. 1087 ISD::ArgFlagsTy Flags = 1088 cast<ARG_FLAGSSDNode>(Op.getOperand(3 + i))->getArgFlags(); 1089 bool AlwaysUseMutable = (CC==CallingConv::Fast) && PerformTailCallOpt; 1090 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); 1091 1092 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1093 // changed with more analysis. 1094 // In case of tail call optimization mark all arguments mutable. Since they 1095 // could be overwritten by lowering of arguments in case of a tail call. 1096 int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, 1097 VA.getLocMemOffset(), isImmutable); 1098 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 1099 if (Flags.isByVal()) 1100 return FIN; 1101 return DAG.getLoad(VA.getValVT(), Root, FIN, 1102 PseudoSourceValue::getFixedStack(), FI); 1103} 1104 1105SDOperand 1106X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { 1107 MachineFunction &MF = DAG.getMachineFunction(); 1108 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1109 1110 const Function* Fn = MF.getFunction(); 1111 if (Fn->hasExternalLinkage() && 1112 Subtarget->isTargetCygMing() && 1113 Fn->getName() == "main") 1114 FuncInfo->setForceFramePointer(true); 1115 1116 // Decorate the function name. 1117 FuncInfo->setDecorationStyle(NameDecorationForFORMAL_ARGUMENTS(Op)); 1118 1119 MachineFrameInfo *MFI = MF.getFrameInfo(); 1120 SDOperand Root = Op.getOperand(0); 1121 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1122 unsigned CC = MF.getFunction()->getCallingConv(); 1123 bool Is64Bit = Subtarget->is64Bit(); 1124 bool IsWin64 = Subtarget->isTargetWin64(); 1125 1126 assert(!(isVarArg && CC == CallingConv::Fast) && 1127 "Var args not supported with calling convention fastcc"); 1128 1129 // Assign locations to all of the incoming arguments. 1130 SmallVector<CCValAssign, 16> ArgLocs; 1131 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1132 CCInfo.AnalyzeFormalArguments(Op.Val, CCAssignFnForNode(Op)); 1133 1134 SmallVector<SDOperand, 8> ArgValues; 1135 unsigned LastVal = ~0U; 1136 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1137 CCValAssign &VA = ArgLocs[i]; 1138 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1139 // places. 1140 assert(VA.getValNo() != LastVal && 1141 "Don't support value assigned to multiple locs yet"); 1142 LastVal = VA.getValNo(); 1143 1144 if (VA.isRegLoc()) { 1145 MVT::ValueType RegVT = VA.getLocVT(); 1146 TargetRegisterClass *RC; 1147 if (RegVT == MVT::i32) 1148 RC = X86::GR32RegisterClass; 1149 else if (Is64Bit && RegVT == MVT::i64) 1150 RC = X86::GR64RegisterClass; 1151 else if (RegVT == MVT::f32) 1152 RC = X86::FR32RegisterClass; 1153 else if (RegVT == MVT::f64) 1154 RC = X86::FR64RegisterClass; 1155 else if (MVT::isVector(RegVT) && MVT::getSizeInBits(RegVT) == 128) 1156 RC = X86::VR128RegisterClass; 1157 else if (MVT::isVector(RegVT)) { 1158 assert(MVT::getSizeInBits(RegVT) == 64); 1159 if (!Is64Bit) 1160 RC = X86::VR64RegisterClass; // MMX values are passed in MMXs. 1161 else { 1162 // Darwin calling convention passes MMX values in either GPRs or 1163 // XMMs in x86-64. Other targets pass them in memory. 1164 if (RegVT != MVT::v1i64 && Subtarget->hasSSE2()) { 1165 RC = X86::VR128RegisterClass; // MMX values are passed in XMMs. 1166 RegVT = MVT::v2i64; 1167 } else { 1168 RC = X86::GR64RegisterClass; // v1i64 values are passed in GPRs. 1169 RegVT = MVT::i64; 1170 } 1171 } 1172 } else { 1173 assert(0 && "Unknown argument type!"); 1174 } 1175 1176 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 1177 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 1178 1179 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1180 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1181 // right size. 1182 if (VA.getLocInfo() == CCValAssign::SExt) 1183 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 1184 DAG.getValueType(VA.getValVT())); 1185 else if (VA.getLocInfo() == CCValAssign::ZExt) 1186 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 1187 DAG.getValueType(VA.getValVT())); 1188 1189 if (VA.getLocInfo() != CCValAssign::Full) 1190 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 1191 1192 // Handle MMX values passed in GPRs. 1193 if (Is64Bit && RegVT != VA.getLocVT()) { 1194 if (MVT::getSizeInBits(RegVT) == 64 && RC == X86::GR64RegisterClass) 1195 ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); 1196 else if (RC == X86::VR128RegisterClass) { 1197 ArgValue = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i64, ArgValue, 1198 DAG.getConstant(0, MVT::i64)); 1199 ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); 1200 } 1201 } 1202 1203 ArgValues.push_back(ArgValue); 1204 } else { 1205 assert(VA.isMemLoc()); 1206 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, CC, Root, i)); 1207 } 1208 } 1209 1210 // The x86-64 ABI for returning structs by value requires that we copy 1211 // the sret argument into %rax for the return. Save the argument into 1212 // a virtual register so that we can access it from the return points. 1213 if (Is64Bit && DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { 1214 MachineFunction &MF = DAG.getMachineFunction(); 1215 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1216 unsigned Reg = FuncInfo->getSRetReturnReg(); 1217 if (!Reg) { 1218 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); 1219 FuncInfo->setSRetReturnReg(Reg); 1220 } 1221 SDOperand Copy = DAG.getCopyToReg(DAG.getEntryNode(), Reg, ArgValues[0]); 1222 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, Copy, Root); 1223 } 1224 1225 unsigned StackSize = CCInfo.getNextStackOffset(); 1226 // align stack specially for tail calls 1227 if (CC == CallingConv::Fast) 1228 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1229 1230 // If the function takes variable number of arguments, make a frame index for 1231 // the start of the first vararg value... for expansion of llvm.va_start. 1232 if (isVarArg) { 1233 if (Is64Bit || CC != CallingConv::X86_FastCall) { 1234 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); 1235 } 1236 if (Is64Bit) { 1237 unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0; 1238 1239 // FIXME: We should really autogenerate these arrays 1240 static const unsigned GPR64ArgRegsWin64[] = { 1241 X86::RCX, X86::RDX, X86::R8, X86::R9 1242 }; 1243 static const unsigned XMMArgRegsWin64[] = { 1244 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 1245 }; 1246 static const unsigned GPR64ArgRegs64Bit[] = { 1247 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1248 }; 1249 static const unsigned XMMArgRegs64Bit[] = { 1250 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1251 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1252 }; 1253 const unsigned *GPR64ArgRegs, *XMMArgRegs; 1254 1255 if (IsWin64) { 1256 TotalNumIntRegs = 4; TotalNumXMMRegs = 4; 1257 GPR64ArgRegs = GPR64ArgRegsWin64; 1258 XMMArgRegs = XMMArgRegsWin64; 1259 } else { 1260 TotalNumIntRegs = 6; TotalNumXMMRegs = 8; 1261 GPR64ArgRegs = GPR64ArgRegs64Bit; 1262 XMMArgRegs = XMMArgRegs64Bit; 1263 } 1264 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 1265 TotalNumIntRegs); 1266 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 1267 TotalNumXMMRegs); 1268 1269 // For X86-64, if there are vararg parameters that are passed via 1270 // registers, then we must store them to their spots on the stack so they 1271 // may be loaded by deferencing the result of va_next. 1272 VarArgsGPOffset = NumIntRegs * 8; 1273 VarArgsFPOffset = TotalNumIntRegs * 8 + NumXMMRegs * 16; 1274 RegSaveFrameIndex = MFI->CreateStackObject(TotalNumIntRegs * 8 + 1275 TotalNumXMMRegs * 16, 16); 1276 1277 // Store the integer parameter registers. 1278 SmallVector<SDOperand, 8> MemOps; 1279 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 1280 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1281 DAG.getIntPtrConstant(VarArgsGPOffset)); 1282 for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { 1283 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], 1284 X86::GR64RegisterClass); 1285 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1286 SDOperand Store = 1287 DAG.getStore(Val.getValue(1), Val, FIN, 1288 PseudoSourceValue::getFixedStack(), 1289 RegSaveFrameIndex); 1290 MemOps.push_back(Store); 1291 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1292 DAG.getIntPtrConstant(8)); 1293 } 1294 1295 // Now store the XMM (fp + vector) parameter registers. 1296 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1297 DAG.getIntPtrConstant(VarArgsFPOffset)); 1298 for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { 1299 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 1300 X86::VR128RegisterClass); 1301 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); 1302 SDOperand Store = 1303 DAG.getStore(Val.getValue(1), Val, FIN, 1304 PseudoSourceValue::getFixedStack(), 1305 RegSaveFrameIndex); 1306 MemOps.push_back(Store); 1307 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1308 DAG.getIntPtrConstant(16)); 1309 } 1310 if (!MemOps.empty()) 1311 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 1312 &MemOps[0], MemOps.size()); 1313 } 1314 } 1315 1316 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1317 // arguments and the arguments after the retaddr has been pushed are 1318 // aligned. 1319 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1320 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1321 (StackSize & 7) == 0) 1322 StackSize += 4; 1323 1324 ArgValues.push_back(Root); 1325 1326 // Some CCs need callee pop. 1327 if (IsCalleePop(Op)) { 1328 BytesToPopOnReturn = StackSize; // Callee pops everything. 1329 BytesCallerReserves = 0; 1330 } else { 1331 BytesToPopOnReturn = 0; // Callee pops nothing. 1332 // If this is an sret function, the return should pop the hidden pointer. 1333 if (!Is64Bit && ArgsAreStructReturn(Op)) 1334 BytesToPopOnReturn = 4; 1335 BytesCallerReserves = StackSize; 1336 } 1337 1338 if (!Is64Bit) { 1339 RegSaveFrameIndex = 0xAAAAAAA; // RegSaveFrameIndex is X86-64 only. 1340 if (CC == CallingConv::X86_FastCall) 1341 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 1342 } 1343 1344 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 1345 1346 // Return the new list of results. 1347 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1348 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1349} 1350 1351SDOperand 1352X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG, 1353 const SDOperand &StackPtr, 1354 const CCValAssign &VA, 1355 SDOperand Chain, 1356 SDOperand Arg) { 1357 unsigned LocMemOffset = VA.getLocMemOffset(); 1358 SDOperand PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1359 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1360 ISD::ArgFlagsTy Flags = 1361 cast<ARG_FLAGSSDNode>(Op.getOperand(6+2*VA.getValNo()))->getArgFlags(); 1362 if (Flags.isByVal()) { 1363 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG); 1364 } 1365 return DAG.getStore(Chain, Arg, PtrOff, 1366 PseudoSourceValue::getStack(), LocMemOffset); 1367} 1368 1369/// EmitTailCallLoadRetAddr - Emit a load of return adress if tail call 1370/// optimization is performed and it is required. 1371SDOperand 1372X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, 1373 SDOperand &OutRetAddr, 1374 SDOperand Chain, 1375 bool IsTailCall, 1376 bool Is64Bit, 1377 int FPDiff) { 1378 if (!IsTailCall || FPDiff==0) return Chain; 1379 1380 // Adjust the Return address stack slot. 1381 MVT::ValueType VT = getPointerTy(); 1382 OutRetAddr = getReturnAddressFrameIndex(DAG); 1383 // Load the "old" Return address. 1384 OutRetAddr = DAG.getLoad(VT, Chain,OutRetAddr, NULL, 0); 1385 return SDOperand(OutRetAddr.Val, 1); 1386} 1387 1388/// EmitTailCallStoreRetAddr - Emit a store of the return adress if tail call 1389/// optimization is performed and it is required (FPDiff!=0). 1390static SDOperand 1391EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, 1392 SDOperand Chain, SDOperand RetAddrFrIdx, 1393 bool Is64Bit, int FPDiff) { 1394 // Store the return address to the appropriate stack slot. 1395 if (!FPDiff) return Chain; 1396 // Calculate the new stack slot for the return address. 1397 int SlotSize = Is64Bit ? 8 : 4; 1398 int NewReturnAddrFI = 1399 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize); 1400 MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32; 1401 SDOperand NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 1402 Chain = DAG.getStore(Chain, RetAddrFrIdx, NewRetAddrFrIdx, 1403 PseudoSourceValue::getFixedStack(), NewReturnAddrFI); 1404 return Chain; 1405} 1406 1407SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { 1408 MachineFunction &MF = DAG.getMachineFunction(); 1409 SDOperand Chain = Op.getOperand(0); 1410 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1411 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1412 bool IsTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0 1413 && CC == CallingConv::Fast && PerformTailCallOpt; 1414 SDOperand Callee = Op.getOperand(4); 1415 bool Is64Bit = Subtarget->is64Bit(); 1416 bool IsStructRet = CallIsStructReturn(Op); 1417 1418 assert(!(isVarArg && CC == CallingConv::Fast) && 1419 "Var args not supported with calling convention fastcc"); 1420 1421 // Analyze operands of the call, assigning locations to each operand. 1422 SmallVector<CCValAssign, 16> ArgLocs; 1423 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1424 CCInfo.AnalyzeCallOperands(Op.Val, CCAssignFnForNode(Op)); 1425 1426 // Get a count of how many bytes are to be pushed on the stack. 1427 unsigned NumBytes = CCInfo.getNextStackOffset(); 1428 if (CC == CallingConv::Fast) 1429 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 1430 1431 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1432 // arguments and the arguments after the retaddr has been pushed are aligned. 1433 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1434 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1435 (NumBytes & 7) == 0) 1436 NumBytes += 4; 1437 1438 int FPDiff = 0; 1439 if (IsTailCall) { 1440 // Lower arguments at fp - stackoffset + fpdiff. 1441 unsigned NumBytesCallerPushed = 1442 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 1443 FPDiff = NumBytesCallerPushed - NumBytes; 1444 1445 // Set the delta of movement of the returnaddr stackslot. 1446 // But only set if delta is greater than previous delta. 1447 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 1448 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 1449 } 1450 1451 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes)); 1452 1453 SDOperand RetAddrFrIdx; 1454 // Load return adress for tail calls. 1455 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, IsTailCall, Is64Bit, 1456 FPDiff); 1457 1458 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1459 SmallVector<SDOperand, 8> MemOpChains; 1460 SDOperand StackPtr; 1461 1462 // Walk the register/memloc assignments, inserting copies/loads. In the case 1463 // of tail call optimization arguments are handle later. 1464 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1465 CCValAssign &VA = ArgLocs[i]; 1466 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1467 bool isByVal = cast<ARG_FLAGSSDNode>(Op.getOperand(6+2*VA.getValNo()))-> 1468 getArgFlags().isByVal(); 1469 1470 // Promote the value if needed. 1471 switch (VA.getLocInfo()) { 1472 default: assert(0 && "Unknown loc info!"); 1473 case CCValAssign::Full: break; 1474 case CCValAssign::SExt: 1475 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1476 break; 1477 case CCValAssign::ZExt: 1478 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1479 break; 1480 case CCValAssign::AExt: 1481 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1482 break; 1483 } 1484 1485 if (VA.isRegLoc()) { 1486 if (Is64Bit) { 1487 MVT::ValueType RegVT = VA.getLocVT(); 1488 if (MVT::isVector(RegVT) && MVT::getSizeInBits(RegVT) == 64) 1489 switch (VA.getLocReg()) { 1490 default: 1491 break; 1492 case X86::RDI: case X86::RSI: case X86::RDX: case X86::RCX: 1493 case X86::R8: { 1494 // Special case: passing MMX values in GPR registers. 1495 Arg = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Arg); 1496 break; 1497 } 1498 case X86::XMM0: case X86::XMM1: case X86::XMM2: case X86::XMM3: 1499 case X86::XMM4: case X86::XMM5: case X86::XMM6: case X86::XMM7: { 1500 // Special case: passing MMX values in XMM registers. 1501 Arg = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Arg); 1502 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Arg); 1503 Arg = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64, 1504 DAG.getNode(ISD::UNDEF, MVT::v2i64), Arg, 1505 getMOVLMask(2, DAG)); 1506 break; 1507 } 1508 } 1509 } 1510 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1511 } else { 1512 if (!IsTailCall || (IsTailCall && isByVal)) { 1513 assert(VA.isMemLoc()); 1514 if (StackPtr.Val == 0) 1515 StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy()); 1516 1517 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1518 Arg)); 1519 } 1520 } 1521 } 1522 1523 if (!MemOpChains.empty()) 1524 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1525 &MemOpChains[0], MemOpChains.size()); 1526 1527 // Build a sequence of copy-to-reg nodes chained together with token chain 1528 // and flag operands which copy the outgoing args into registers. 1529 SDOperand InFlag; 1530 // Tail call byval lowering might overwrite argument registers so in case of 1531 // tail call optimization the copies to registers are lowered later. 1532 if (!IsTailCall) 1533 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1534 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1535 InFlag); 1536 InFlag = Chain.getValue(1); 1537 } 1538 1539 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1540 // GOT pointer. 1541 if (CallRequiresGOTPtrInReg(Is64Bit, IsTailCall)) { 1542 Chain = DAG.getCopyToReg(Chain, X86::EBX, 1543 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 1544 InFlag); 1545 InFlag = Chain.getValue(1); 1546 } 1547 // If we are tail calling and generating PIC/GOT style code load the address 1548 // of the callee into ecx. The value in ecx is used as target of the tail 1549 // jump. This is done to circumvent the ebx/callee-saved problem for tail 1550 // calls on PIC/GOT architectures. Normally we would just put the address of 1551 // GOT into ebx and then call target@PLT. But for tail callss ebx would be 1552 // restored (since ebx is callee saved) before jumping to the target@PLT. 1553 if (CallRequiresFnAddressInReg(Is64Bit, IsTailCall)) { 1554 // Note: The actual moving to ecx is done further down. 1555 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 1556 if (G && !G->getGlobal()->hasHiddenVisibility() && 1557 !G->getGlobal()->hasProtectedVisibility()) 1558 Callee = LowerGlobalAddress(Callee, DAG); 1559 else if (isa<ExternalSymbolSDNode>(Callee)) 1560 Callee = LowerExternalSymbol(Callee,DAG); 1561 } 1562 1563 if (Is64Bit && isVarArg) { 1564 // From AMD64 ABI document: 1565 // For calls that may call functions that use varargs or stdargs 1566 // (prototype-less calls or calls to functions containing ellipsis (...) in 1567 // the declaration) %al is used as hidden argument to specify the number 1568 // of SSE registers used. The contents of %al do not need to match exactly 1569 // the number of registers, but must be an ubound on the number of SSE 1570 // registers used and is in the range 0 - 8 inclusive. 1571 1572 // FIXME: Verify this on Win64 1573 // Count the number of XMM registers allocated. 1574 static const unsigned XMMArgRegs[] = { 1575 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1576 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1577 }; 1578 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1579 1580 Chain = DAG.getCopyToReg(Chain, X86::AL, 1581 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 1582 InFlag = Chain.getValue(1); 1583 } 1584 1585 1586 // For tail calls lower the arguments to the 'real' stack slot. 1587 if (IsTailCall) { 1588 SmallVector<SDOperand, 8> MemOpChains2; 1589 SDOperand FIN; 1590 int FI = 0; 1591 // Do not flag preceeding copytoreg stuff together with the following stuff. 1592 InFlag = SDOperand(); 1593 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1594 CCValAssign &VA = ArgLocs[i]; 1595 if (!VA.isRegLoc()) { 1596 assert(VA.isMemLoc()); 1597 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1598 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1599 ISD::ArgFlagsTy Flags = 1600 cast<ARG_FLAGSSDNode>(FlagsOp)->getArgFlags(); 1601 // Create frame index. 1602 int32_t Offset = VA.getLocMemOffset()+FPDiff; 1603 uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8; 1604 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); 1605 FIN = DAG.getFrameIndex(FI, getPointerTy()); 1606 1607 if (Flags.isByVal()) { 1608 // Copy relative to framepointer. 1609 SDOperand Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 1610 if (StackPtr.Val == 0) 1611 StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy()); 1612 Source = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, Source); 1613 1614 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, Chain, 1615 Flags, DAG)); 1616 } else { 1617 // Store relative to framepointer. 1618 MemOpChains2.push_back( 1619 DAG.getStore(Chain, Arg, FIN, 1620 PseudoSourceValue::getFixedStack(), FI)); 1621 } 1622 } 1623 } 1624 1625 if (!MemOpChains2.empty()) 1626 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1627 &MemOpChains2[0], MemOpChains2.size()); 1628 1629 // Copy arguments to their registers. 1630 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1631 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1632 InFlag); 1633 InFlag = Chain.getValue(1); 1634 } 1635 InFlag =SDOperand(); 1636 1637 // Store the return address to the appropriate stack slot. 1638 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit, 1639 FPDiff); 1640 } 1641 1642 // If the callee is a GlobalAddress node (quite common, every direct call is) 1643 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1644 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1645 // We should use extra load for direct calls to dllimported functions in 1646 // non-JIT mode. 1647 if ((IsTailCall || !Is64Bit || 1648 getTargetMachine().getCodeModel() != CodeModel::Large) 1649 && !Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1650 getTargetMachine(), true)) 1651 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1652 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1653 if (IsTailCall || !Is64Bit || 1654 getTargetMachine().getCodeModel() != CodeModel::Large) 1655 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1656 } else if (IsTailCall) { 1657 unsigned Opc = Is64Bit ? X86::R9 : X86::ECX; 1658 1659 Chain = DAG.getCopyToReg(Chain, 1660 DAG.getRegister(Opc, getPointerTy()), 1661 Callee,InFlag); 1662 Callee = DAG.getRegister(Opc, getPointerTy()); 1663 // Add register as live out. 1664 DAG.getMachineFunction().getRegInfo().addLiveOut(Opc); 1665 } 1666 1667 // Returns a chain & a flag for retval copy to use. 1668 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1669 SmallVector<SDOperand, 8> Ops; 1670 1671 if (IsTailCall) { 1672 Ops.push_back(Chain); 1673 Ops.push_back(DAG.getIntPtrConstant(NumBytes)); 1674 Ops.push_back(DAG.getIntPtrConstant(0)); 1675 if (InFlag.Val) 1676 Ops.push_back(InFlag); 1677 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1678 InFlag = Chain.getValue(1); 1679 1680 // Returns a chain & a flag for retval copy to use. 1681 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1682 Ops.clear(); 1683 } 1684 1685 Ops.push_back(Chain); 1686 Ops.push_back(Callee); 1687 1688 if (IsTailCall) 1689 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 1690 1691 // Add argument registers to the end of the list so that they are known live 1692 // into the call. 1693 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1694 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1695 RegsToPass[i].second.getValueType())); 1696 1697 // Add an implicit use GOT pointer in EBX. 1698 if (!IsTailCall && !Is64Bit && 1699 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1700 Subtarget->isPICStyleGOT()) 1701 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 1702 1703 // Add an implicit use of AL for x86 vararg functions. 1704 if (Is64Bit && isVarArg) 1705 Ops.push_back(DAG.getRegister(X86::AL, MVT::i8)); 1706 1707 if (InFlag.Val) 1708 Ops.push_back(InFlag); 1709 1710 if (IsTailCall) { 1711 assert(InFlag.Val && 1712 "Flag must be set. Depend on flag being set in LowerRET"); 1713 Chain = DAG.getNode(X86ISD::TAILCALL, 1714 Op.Val->getVTList(), &Ops[0], Ops.size()); 1715 1716 return SDOperand(Chain.Val, Op.ResNo); 1717 } 1718 1719 Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size()); 1720 InFlag = Chain.getValue(1); 1721 1722 // Create the CALLSEQ_END node. 1723 unsigned NumBytesForCalleeToPush; 1724 if (IsCalleePop(Op)) 1725 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 1726 else if (!Is64Bit && IsStructRet) 1727 // If this is is a call to a struct-return function, the callee 1728 // pops the hidden struct pointer, so we have to push it back. 1729 // This is common for Darwin/X86, Linux & Mingw32 targets. 1730 NumBytesForCalleeToPush = 4; 1731 else 1732 NumBytesForCalleeToPush = 0; // Callee pops nothing. 1733 1734 // Returns a flag for retval copy to use. 1735 Chain = DAG.getCALLSEQ_END(Chain, 1736 DAG.getIntPtrConstant(NumBytes), 1737 DAG.getIntPtrConstant(NumBytesForCalleeToPush), 1738 InFlag); 1739 InFlag = Chain.getValue(1); 1740 1741 // Handle result values, copying them out of physregs into vregs that we 1742 // return. 1743 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1744} 1745 1746 1747//===----------------------------------------------------------------------===// 1748// Fast Calling Convention (tail call) implementation 1749//===----------------------------------------------------------------------===// 1750 1751// Like std call, callee cleans arguments, convention except that ECX is 1752// reserved for storing the tail called function address. Only 2 registers are 1753// free for argument passing (inreg). Tail call optimization is performed 1754// provided: 1755// * tailcallopt is enabled 1756// * caller/callee are fastcc 1757// On X86_64 architecture with GOT-style position independent code only local 1758// (within module) calls are supported at the moment. 1759// To keep the stack aligned according to platform abi the function 1760// GetAlignedArgumentStackSize ensures that argument delta is always multiples 1761// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 1762// If a tail called function callee has more arguments than the caller the 1763// caller needs to make sure that there is room to move the RETADDR to. This is 1764// achieved by reserving an area the size of the argument delta right after the 1765// original REtADDR, but before the saved framepointer or the spilled registers 1766// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 1767// stack layout: 1768// arg1 1769// arg2 1770// RETADDR 1771// [ new RETADDR 1772// move area ] 1773// (possible EBP) 1774// ESI 1775// EDI 1776// local1 .. 1777 1778/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 1779/// for a 16 byte align requirement. 1780unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 1781 SelectionDAG& DAG) { 1782 if (PerformTailCallOpt) { 1783 MachineFunction &MF = DAG.getMachineFunction(); 1784 const TargetMachine &TM = MF.getTarget(); 1785 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 1786 unsigned StackAlignment = TFI.getStackAlignment(); 1787 uint64_t AlignMask = StackAlignment - 1; 1788 int64_t Offset = StackSize; 1789 unsigned SlotSize = Subtarget->is64Bit() ? 8 : 4; 1790 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 1791 // Number smaller than 12 so just add the difference. 1792 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 1793 } else { 1794 // Mask out lower bits, add stackalignment once plus the 12 bytes. 1795 Offset = ((~AlignMask) & Offset) + StackAlignment + 1796 (StackAlignment-SlotSize); 1797 } 1798 StackSize = Offset; 1799 } 1800 return StackSize; 1801} 1802 1803/// IsEligibleForTailCallElimination - Check to see whether the next instruction 1804/// following the call is a return. A function is eligible if caller/callee 1805/// calling conventions match, currently only fastcc supports tail calls, and 1806/// the function CALL is immediatly followed by a RET. 1807bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call, 1808 SDOperand Ret, 1809 SelectionDAG& DAG) const { 1810 if (!PerformTailCallOpt) 1811 return false; 1812 1813 if (CheckTailCallReturnConstraints(Call, Ret)) { 1814 MachineFunction &MF = DAG.getMachineFunction(); 1815 unsigned CallerCC = MF.getFunction()->getCallingConv(); 1816 unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue(); 1817 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 1818 SDOperand Callee = Call.getOperand(4); 1819 // On x86/32Bit PIC/GOT tail calls are supported. 1820 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ || 1821 !Subtarget->isPICStyleGOT()|| !Subtarget->is64Bit()) 1822 return true; 1823 1824 // Can only do local tail calls (in same module, hidden or protected) on 1825 // x86_64 PIC/GOT at the moment. 1826 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1827 return G->getGlobal()->hasHiddenVisibility() 1828 || G->getGlobal()->hasProtectedVisibility(); 1829 } 1830 } 1831 1832 return false; 1833} 1834 1835//===----------------------------------------------------------------------===// 1836// Other Lowering Hooks 1837//===----------------------------------------------------------------------===// 1838 1839 1840SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { 1841 MachineFunction &MF = DAG.getMachineFunction(); 1842 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1843 int ReturnAddrIndex = FuncInfo->getRAIndex(); 1844 1845 if (ReturnAddrIndex == 0) { 1846 // Set up a frame object for the return address. 1847 if (Subtarget->is64Bit()) 1848 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8); 1849 else 1850 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4); 1851 1852 FuncInfo->setRAIndex(ReturnAddrIndex); 1853 } 1854 1855 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 1856} 1857 1858 1859 1860/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86 1861/// specific condition code. It returns a false if it cannot do a direct 1862/// translation. X86CC is the translated CondCode. LHS/RHS are modified as 1863/// needed. 1864static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 1865 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS, 1866 SelectionDAG &DAG) { 1867 X86CC = X86::COND_INVALID; 1868 if (!isFP) { 1869 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 1870 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 1871 // X > -1 -> X == 0, jump !sign. 1872 RHS = DAG.getConstant(0, RHS.getValueType()); 1873 X86CC = X86::COND_NS; 1874 return true; 1875 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 1876 // X < 0 -> X == 0, jump on sign. 1877 X86CC = X86::COND_S; 1878 return true; 1879 } else if (SetCCOpcode == ISD::SETLT && RHSC->getValue() == 1) { 1880 // X < 1 -> X <= 0 1881 RHS = DAG.getConstant(0, RHS.getValueType()); 1882 X86CC = X86::COND_LE; 1883 return true; 1884 } 1885 } 1886 1887 switch (SetCCOpcode) { 1888 default: break; 1889 case ISD::SETEQ: X86CC = X86::COND_E; break; 1890 case ISD::SETGT: X86CC = X86::COND_G; break; 1891 case ISD::SETGE: X86CC = X86::COND_GE; break; 1892 case ISD::SETLT: X86CC = X86::COND_L; break; 1893 case ISD::SETLE: X86CC = X86::COND_LE; break; 1894 case ISD::SETNE: X86CC = X86::COND_NE; break; 1895 case ISD::SETULT: X86CC = X86::COND_B; break; 1896 case ISD::SETUGT: X86CC = X86::COND_A; break; 1897 case ISD::SETULE: X86CC = X86::COND_BE; break; 1898 case ISD::SETUGE: X86CC = X86::COND_AE; break; 1899 } 1900 } else { 1901 // On a floating point condition, the flags are set as follows: 1902 // ZF PF CF op 1903 // 0 | 0 | 0 | X > Y 1904 // 0 | 0 | 1 | X < Y 1905 // 1 | 0 | 0 | X == Y 1906 // 1 | 1 | 1 | unordered 1907 bool Flip = false; 1908 switch (SetCCOpcode) { 1909 default: break; 1910 case ISD::SETUEQ: 1911 case ISD::SETEQ: X86CC = X86::COND_E; break; 1912 case ISD::SETOLT: Flip = true; // Fallthrough 1913 case ISD::SETOGT: 1914 case ISD::SETGT: X86CC = X86::COND_A; break; 1915 case ISD::SETOLE: Flip = true; // Fallthrough 1916 case ISD::SETOGE: 1917 case ISD::SETGE: X86CC = X86::COND_AE; break; 1918 case ISD::SETUGT: Flip = true; // Fallthrough 1919 case ISD::SETULT: 1920 case ISD::SETLT: X86CC = X86::COND_B; break; 1921 case ISD::SETUGE: Flip = true; // Fallthrough 1922 case ISD::SETULE: 1923 case ISD::SETLE: X86CC = X86::COND_BE; break; 1924 case ISD::SETONE: 1925 case ISD::SETNE: X86CC = X86::COND_NE; break; 1926 case ISD::SETUO: X86CC = X86::COND_P; break; 1927 case ISD::SETO: X86CC = X86::COND_NP; break; 1928 } 1929 if (Flip) 1930 std::swap(LHS, RHS); 1931 } 1932 1933 return X86CC != X86::COND_INVALID; 1934} 1935 1936/// hasFPCMov - is there a floating point cmov for the specific X86 condition 1937/// code. Current x86 isa includes the following FP cmov instructions: 1938/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 1939static bool hasFPCMov(unsigned X86CC) { 1940 switch (X86CC) { 1941 default: 1942 return false; 1943 case X86::COND_B: 1944 case X86::COND_BE: 1945 case X86::COND_E: 1946 case X86::COND_P: 1947 case X86::COND_A: 1948 case X86::COND_AE: 1949 case X86::COND_NE: 1950 case X86::COND_NP: 1951 return true; 1952 } 1953} 1954 1955/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return 1956/// true if Op is undef or if its value falls within the specified range (L, H]. 1957static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { 1958 if (Op.getOpcode() == ISD::UNDEF) 1959 return true; 1960 1961 unsigned Val = cast<ConstantSDNode>(Op)->getValue(); 1962 return (Val >= Low && Val < Hi); 1963} 1964 1965/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return 1966/// true if Op is undef or if its value equal to the specified value. 1967static bool isUndefOrEqual(SDOperand Op, unsigned Val) { 1968 if (Op.getOpcode() == ISD::UNDEF) 1969 return true; 1970 return cast<ConstantSDNode>(Op)->getValue() == Val; 1971} 1972 1973/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand 1974/// specifies a shuffle of elements that is suitable for input to PSHUFD. 1975bool X86::isPSHUFDMask(SDNode *N) { 1976 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1977 1978 if (N->getNumOperands() != 2 && N->getNumOperands() != 4) 1979 return false; 1980 1981 // Check if the value doesn't reference the second vector. 1982 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1983 SDOperand Arg = N->getOperand(i); 1984 if (Arg.getOpcode() == ISD::UNDEF) continue; 1985 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1986 if (cast<ConstantSDNode>(Arg)->getValue() >= e) 1987 return false; 1988 } 1989 1990 return true; 1991} 1992 1993/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand 1994/// specifies a shuffle of elements that is suitable for input to PSHUFHW. 1995bool X86::isPSHUFHWMask(SDNode *N) { 1996 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1997 1998 if (N->getNumOperands() != 8) 1999 return false; 2000 2001 // Lower quadword copied in order. 2002 for (unsigned i = 0; i != 4; ++i) { 2003 SDOperand Arg = N->getOperand(i); 2004 if (Arg.getOpcode() == ISD::UNDEF) continue; 2005 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2006 if (cast<ConstantSDNode>(Arg)->getValue() != i) 2007 return false; 2008 } 2009 2010 // Upper quadword shuffled. 2011 for (unsigned i = 4; i != 8; ++i) { 2012 SDOperand Arg = N->getOperand(i); 2013 if (Arg.getOpcode() == ISD::UNDEF) continue; 2014 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2015 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2016 if (Val < 4 || Val > 7) 2017 return false; 2018 } 2019 2020 return true; 2021} 2022 2023/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand 2024/// specifies a shuffle of elements that is suitable for input to PSHUFLW. 2025bool X86::isPSHUFLWMask(SDNode *N) { 2026 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2027 2028 if (N->getNumOperands() != 8) 2029 return false; 2030 2031 // Upper quadword copied in order. 2032 for (unsigned i = 4; i != 8; ++i) 2033 if (!isUndefOrEqual(N->getOperand(i), i)) 2034 return false; 2035 2036 // Lower quadword shuffled. 2037 for (unsigned i = 0; i != 4; ++i) 2038 if (!isUndefOrInRange(N->getOperand(i), 0, 4)) 2039 return false; 2040 2041 return true; 2042} 2043 2044/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 2045/// specifies a shuffle of elements that is suitable for input to SHUFP*. 2046static bool isSHUFPMask(SDOperandPtr Elems, unsigned NumElems) { 2047 if (NumElems != 2 && NumElems != 4) return false; 2048 2049 unsigned Half = NumElems / 2; 2050 for (unsigned i = 0; i < Half; ++i) 2051 if (!isUndefOrInRange(Elems[i], 0, NumElems)) 2052 return false; 2053 for (unsigned i = Half; i < NumElems; ++i) 2054 if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2)) 2055 return false; 2056 2057 return true; 2058} 2059 2060bool X86::isSHUFPMask(SDNode *N) { 2061 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2062 return ::isSHUFPMask(N->op_begin(), N->getNumOperands()); 2063} 2064 2065/// isCommutedSHUFP - Returns true if the shuffle mask is exactly 2066/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 2067/// half elements to come from vector 1 (which would equal the dest.) and 2068/// the upper half to come from vector 2. 2069static bool isCommutedSHUFP(SDOperandPtr Ops, unsigned NumOps) { 2070 if (NumOps != 2 && NumOps != 4) return false; 2071 2072 unsigned Half = NumOps / 2; 2073 for (unsigned i = 0; i < Half; ++i) 2074 if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2)) 2075 return false; 2076 for (unsigned i = Half; i < NumOps; ++i) 2077 if (!isUndefOrInRange(Ops[i], 0, NumOps)) 2078 return false; 2079 return true; 2080} 2081 2082static bool isCommutedSHUFP(SDNode *N) { 2083 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2084 return isCommutedSHUFP(N->op_begin(), N->getNumOperands()); 2085} 2086 2087/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 2088/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 2089bool X86::isMOVHLPSMask(SDNode *N) { 2090 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2091 2092 if (N->getNumOperands() != 4) 2093 return false; 2094 2095 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 2096 return isUndefOrEqual(N->getOperand(0), 6) && 2097 isUndefOrEqual(N->getOperand(1), 7) && 2098 isUndefOrEqual(N->getOperand(2), 2) && 2099 isUndefOrEqual(N->getOperand(3), 3); 2100} 2101 2102/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 2103/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 2104/// <2, 3, 2, 3> 2105bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) { 2106 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2107 2108 if (N->getNumOperands() != 4) 2109 return false; 2110 2111 // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3 2112 return isUndefOrEqual(N->getOperand(0), 2) && 2113 isUndefOrEqual(N->getOperand(1), 3) && 2114 isUndefOrEqual(N->getOperand(2), 2) && 2115 isUndefOrEqual(N->getOperand(3), 3); 2116} 2117 2118/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 2119/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 2120bool X86::isMOVLPMask(SDNode *N) { 2121 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2122 2123 unsigned NumElems = N->getNumOperands(); 2124 if (NumElems != 2 && NumElems != 4) 2125 return false; 2126 2127 for (unsigned i = 0; i < NumElems/2; ++i) 2128 if (!isUndefOrEqual(N->getOperand(i), i + NumElems)) 2129 return false; 2130 2131 for (unsigned i = NumElems/2; i < NumElems; ++i) 2132 if (!isUndefOrEqual(N->getOperand(i), i)) 2133 return false; 2134 2135 return true; 2136} 2137 2138/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand 2139/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D} 2140/// and MOVLHPS. 2141bool X86::isMOVHPMask(SDNode *N) { 2142 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2143 2144 unsigned NumElems = N->getNumOperands(); 2145 if (NumElems != 2 && NumElems != 4) 2146 return false; 2147 2148 for (unsigned i = 0; i < NumElems/2; ++i) 2149 if (!isUndefOrEqual(N->getOperand(i), i)) 2150 return false; 2151 2152 for (unsigned i = 0; i < NumElems/2; ++i) { 2153 SDOperand Arg = N->getOperand(i + NumElems/2); 2154 if (!isUndefOrEqual(Arg, i + NumElems)) 2155 return false; 2156 } 2157 2158 return true; 2159} 2160 2161/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 2162/// specifies a shuffle of elements that is suitable for input to UNPCKL. 2163bool static isUNPCKLMask(SDOperandPtr Elts, unsigned NumElts, 2164 bool V2IsSplat = false) { 2165 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2166 return false; 2167 2168 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2169 SDOperand BitI = Elts[i]; 2170 SDOperand BitI1 = Elts[i+1]; 2171 if (!isUndefOrEqual(BitI, j)) 2172 return false; 2173 if (V2IsSplat) { 2174 if (isUndefOrEqual(BitI1, NumElts)) 2175 return false; 2176 } else { 2177 if (!isUndefOrEqual(BitI1, j + NumElts)) 2178 return false; 2179 } 2180 } 2181 2182 return true; 2183} 2184 2185bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) { 2186 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2187 return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2188} 2189 2190/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 2191/// specifies a shuffle of elements that is suitable for input to UNPCKH. 2192bool static isUNPCKHMask(SDOperandPtr Elts, unsigned NumElts, 2193 bool V2IsSplat = false) { 2194 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2195 return false; 2196 2197 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2198 SDOperand BitI = Elts[i]; 2199 SDOperand BitI1 = Elts[i+1]; 2200 if (!isUndefOrEqual(BitI, j + NumElts/2)) 2201 return false; 2202 if (V2IsSplat) { 2203 if (isUndefOrEqual(BitI1, NumElts)) 2204 return false; 2205 } else { 2206 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts)) 2207 return false; 2208 } 2209 } 2210 2211 return true; 2212} 2213 2214bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) { 2215 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2216 return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2217} 2218 2219/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 2220/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 2221/// <0, 0, 1, 1> 2222bool X86::isUNPCKL_v_undef_Mask(SDNode *N) { 2223 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2224 2225 unsigned NumElems = N->getNumOperands(); 2226 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2227 return false; 2228 2229 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2230 SDOperand BitI = N->getOperand(i); 2231 SDOperand BitI1 = N->getOperand(i+1); 2232 2233 if (!isUndefOrEqual(BitI, j)) 2234 return false; 2235 if (!isUndefOrEqual(BitI1, j)) 2236 return false; 2237 } 2238 2239 return true; 2240} 2241 2242/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 2243/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 2244/// <2, 2, 3, 3> 2245bool X86::isUNPCKH_v_undef_Mask(SDNode *N) { 2246 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2247 2248 unsigned NumElems = N->getNumOperands(); 2249 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2250 return false; 2251 2252 for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { 2253 SDOperand BitI = N->getOperand(i); 2254 SDOperand BitI1 = N->getOperand(i + 1); 2255 2256 if (!isUndefOrEqual(BitI, j)) 2257 return false; 2258 if (!isUndefOrEqual(BitI1, j)) 2259 return false; 2260 } 2261 2262 return true; 2263} 2264 2265/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 2266/// specifies a shuffle of elements that is suitable for input to MOVSS, 2267/// MOVSD, and MOVD, i.e. setting the lowest element. 2268static bool isMOVLMask(SDOperandPtr Elts, unsigned NumElts) { 2269 if (NumElts != 2 && NumElts != 4) 2270 return false; 2271 2272 if (!isUndefOrEqual(Elts[0], NumElts)) 2273 return false; 2274 2275 for (unsigned i = 1; i < NumElts; ++i) { 2276 if (!isUndefOrEqual(Elts[i], i)) 2277 return false; 2278 } 2279 2280 return true; 2281} 2282 2283bool X86::isMOVLMask(SDNode *N) { 2284 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2285 return ::isMOVLMask(N->op_begin(), N->getNumOperands()); 2286} 2287 2288/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 2289/// of what x86 movss want. X86 movs requires the lowest element to be lowest 2290/// element of vector 2 and the other elements to come from vector 1 in order. 2291static bool isCommutedMOVL(SDOperandPtr Ops, unsigned NumOps, 2292 bool V2IsSplat = false, 2293 bool V2IsUndef = false) { 2294 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 2295 return false; 2296 2297 if (!isUndefOrEqual(Ops[0], 0)) 2298 return false; 2299 2300 for (unsigned i = 1; i < NumOps; ++i) { 2301 SDOperand Arg = Ops[i]; 2302 if (!(isUndefOrEqual(Arg, i+NumOps) || 2303 (V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) || 2304 (V2IsSplat && isUndefOrEqual(Arg, NumOps)))) 2305 return false; 2306 } 2307 2308 return true; 2309} 2310 2311static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false, 2312 bool V2IsUndef = false) { 2313 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2314 return isCommutedMOVL(N->op_begin(), N->getNumOperands(), 2315 V2IsSplat, V2IsUndef); 2316} 2317 2318/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2319/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 2320bool X86::isMOVSHDUPMask(SDNode *N) { 2321 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2322 2323 if (N->getNumOperands() != 4) 2324 return false; 2325 2326 // Expect 1, 1, 3, 3 2327 for (unsigned i = 0; i < 2; ++i) { 2328 SDOperand Arg = N->getOperand(i); 2329 if (Arg.getOpcode() == ISD::UNDEF) continue; 2330 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2331 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2332 if (Val != 1) return false; 2333 } 2334 2335 bool HasHi = false; 2336 for (unsigned i = 2; i < 4; ++i) { 2337 SDOperand Arg = N->getOperand(i); 2338 if (Arg.getOpcode() == ISD::UNDEF) continue; 2339 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2340 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2341 if (Val != 3) return false; 2342 HasHi = true; 2343 } 2344 2345 // Don't use movshdup if it can be done with a shufps. 2346 return HasHi; 2347} 2348 2349/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2350/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 2351bool X86::isMOVSLDUPMask(SDNode *N) { 2352 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2353 2354 if (N->getNumOperands() != 4) 2355 return false; 2356 2357 // Expect 0, 0, 2, 2 2358 for (unsigned i = 0; i < 2; ++i) { 2359 SDOperand Arg = N->getOperand(i); 2360 if (Arg.getOpcode() == ISD::UNDEF) continue; 2361 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2362 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2363 if (Val != 0) return false; 2364 } 2365 2366 bool HasHi = false; 2367 for (unsigned i = 2; i < 4; ++i) { 2368 SDOperand Arg = N->getOperand(i); 2369 if (Arg.getOpcode() == ISD::UNDEF) continue; 2370 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2371 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2372 if (Val != 2) return false; 2373 HasHi = true; 2374 } 2375 2376 // Don't use movshdup if it can be done with a shufps. 2377 return HasHi; 2378} 2379 2380/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand 2381/// specifies a identity operation on the LHS or RHS. 2382static bool isIdentityMask(SDNode *N, bool RHS = false) { 2383 unsigned NumElems = N->getNumOperands(); 2384 for (unsigned i = 0; i < NumElems; ++i) 2385 if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0))) 2386 return false; 2387 return true; 2388} 2389 2390/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2391/// a splat of a single element. 2392static bool isSplatMask(SDNode *N) { 2393 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2394 2395 // This is a splat operation if each element of the permute is the same, and 2396 // if the value doesn't reference the second vector. 2397 unsigned NumElems = N->getNumOperands(); 2398 SDOperand ElementBase; 2399 unsigned i = 0; 2400 for (; i != NumElems; ++i) { 2401 SDOperand Elt = N->getOperand(i); 2402 if (isa<ConstantSDNode>(Elt)) { 2403 ElementBase = Elt; 2404 break; 2405 } 2406 } 2407 2408 if (!ElementBase.Val) 2409 return false; 2410 2411 for (; i != NumElems; ++i) { 2412 SDOperand Arg = N->getOperand(i); 2413 if (Arg.getOpcode() == ISD::UNDEF) continue; 2414 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2415 if (Arg != ElementBase) return false; 2416 } 2417 2418 // Make sure it is a splat of the first vector operand. 2419 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems; 2420} 2421 2422/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2423/// a splat of a single element and it's a 2 or 4 element mask. 2424bool X86::isSplatMask(SDNode *N) { 2425 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2426 2427 // We can only splat 64-bit, and 32-bit quantities with a single instruction. 2428 if (N->getNumOperands() != 4 && N->getNumOperands() != 2) 2429 return false; 2430 return ::isSplatMask(N); 2431} 2432 2433/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand 2434/// specifies a splat of zero element. 2435bool X86::isSplatLoMask(SDNode *N) { 2436 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2437 2438 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) 2439 if (!isUndefOrEqual(N->getOperand(i), 0)) 2440 return false; 2441 return true; 2442} 2443 2444/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 2445/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* 2446/// instructions. 2447unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 2448 unsigned NumOperands = N->getNumOperands(); 2449 unsigned Shift = (NumOperands == 4) ? 2 : 1; 2450 unsigned Mask = 0; 2451 for (unsigned i = 0; i < NumOperands; ++i) { 2452 unsigned Val = 0; 2453 SDOperand Arg = N->getOperand(NumOperands-i-1); 2454 if (Arg.getOpcode() != ISD::UNDEF) 2455 Val = cast<ConstantSDNode>(Arg)->getValue(); 2456 if (Val >= NumOperands) Val -= NumOperands; 2457 Mask |= Val; 2458 if (i != NumOperands - 1) 2459 Mask <<= Shift; 2460 } 2461 2462 return Mask; 2463} 2464 2465/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 2466/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW 2467/// instructions. 2468unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 2469 unsigned Mask = 0; 2470 // 8 nodes, but we only care about the last 4. 2471 for (unsigned i = 7; i >= 4; --i) { 2472 unsigned Val = 0; 2473 SDOperand Arg = N->getOperand(i); 2474 if (Arg.getOpcode() != ISD::UNDEF) 2475 Val = cast<ConstantSDNode>(Arg)->getValue(); 2476 Mask |= (Val - 4); 2477 if (i != 4) 2478 Mask <<= 2; 2479 } 2480 2481 return Mask; 2482} 2483 2484/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 2485/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW 2486/// instructions. 2487unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 2488 unsigned Mask = 0; 2489 // 8 nodes, but we only care about the first 4. 2490 for (int i = 3; i >= 0; --i) { 2491 unsigned Val = 0; 2492 SDOperand Arg = N->getOperand(i); 2493 if (Arg.getOpcode() != ISD::UNDEF) 2494 Val = cast<ConstantSDNode>(Arg)->getValue(); 2495 Mask |= Val; 2496 if (i != 0) 2497 Mask <<= 2; 2498 } 2499 2500 return Mask; 2501} 2502 2503/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand 2504/// specifies a 8 element shuffle that can be broken into a pair of 2505/// PSHUFHW and PSHUFLW. 2506static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { 2507 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2508 2509 if (N->getNumOperands() != 8) 2510 return false; 2511 2512 // Lower quadword shuffled. 2513 for (unsigned i = 0; i != 4; ++i) { 2514 SDOperand Arg = N->getOperand(i); 2515 if (Arg.getOpcode() == ISD::UNDEF) continue; 2516 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2517 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2518 if (Val >= 4) 2519 return false; 2520 } 2521 2522 // Upper quadword shuffled. 2523 for (unsigned i = 4; i != 8; ++i) { 2524 SDOperand Arg = N->getOperand(i); 2525 if (Arg.getOpcode() == ISD::UNDEF) continue; 2526 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2527 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2528 if (Val < 4 || Val > 7) 2529 return false; 2530 } 2531 2532 return true; 2533} 2534 2535/// CommuteVectorShuffle - Swap vector_shuffle operands as well as 2536/// values in ther permute mask. 2537static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, 2538 SDOperand &V2, SDOperand &Mask, 2539 SelectionDAG &DAG) { 2540 MVT::ValueType VT = Op.getValueType(); 2541 MVT::ValueType MaskVT = Mask.getValueType(); 2542 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2543 unsigned NumElems = Mask.getNumOperands(); 2544 SmallVector<SDOperand, 8> MaskVec; 2545 2546 for (unsigned i = 0; i != NumElems; ++i) { 2547 SDOperand Arg = Mask.getOperand(i); 2548 if (Arg.getOpcode() == ISD::UNDEF) { 2549 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2550 continue; 2551 } 2552 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2553 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2554 if (Val < NumElems) 2555 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2556 else 2557 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2558 } 2559 2560 std::swap(V1, V2); 2561 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2562 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2563} 2564 2565/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 2566/// the two vector operands have swapped position. 2567static 2568SDOperand CommuteVectorShuffleMask(SDOperand Mask, SelectionDAG &DAG) { 2569 MVT::ValueType MaskVT = Mask.getValueType(); 2570 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2571 unsigned NumElems = Mask.getNumOperands(); 2572 SmallVector<SDOperand, 8> MaskVec; 2573 for (unsigned i = 0; i != NumElems; ++i) { 2574 SDOperand Arg = Mask.getOperand(i); 2575 if (Arg.getOpcode() == ISD::UNDEF) { 2576 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2577 continue; 2578 } 2579 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2580 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2581 if (Val < NumElems) 2582 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2583 else 2584 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2585 } 2586 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2587} 2588 2589 2590/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 2591/// match movhlps. The lower half elements should come from upper half of 2592/// V1 (and in order), and the upper half elements should come from the upper 2593/// half of V2 (and in order). 2594static bool ShouldXformToMOVHLPS(SDNode *Mask) { 2595 unsigned NumElems = Mask->getNumOperands(); 2596 if (NumElems != 4) 2597 return false; 2598 for (unsigned i = 0, e = 2; i != e; ++i) 2599 if (!isUndefOrEqual(Mask->getOperand(i), i+2)) 2600 return false; 2601 for (unsigned i = 2; i != 4; ++i) 2602 if (!isUndefOrEqual(Mask->getOperand(i), i+4)) 2603 return false; 2604 return true; 2605} 2606 2607/// isScalarLoadToVector - Returns true if the node is a scalar load that 2608/// is promoted to a vector. 2609static inline bool isScalarLoadToVector(SDNode *N) { 2610 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) { 2611 N = N->getOperand(0).Val; 2612 return ISD::isNON_EXTLoad(N); 2613 } 2614 return false; 2615} 2616 2617/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 2618/// match movlp{s|d}. The lower half elements should come from lower half of 2619/// V1 (and in order), and the upper half elements should come from the upper 2620/// half of V2 (and in order). And since V1 will become the source of the 2621/// MOVLP, it must be either a vector load or a scalar load to vector. 2622static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) { 2623 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 2624 return false; 2625 // Is V2 is a vector load, don't do this transformation. We will try to use 2626 // load folding shufps op. 2627 if (ISD::isNON_EXTLoad(V2)) 2628 return false; 2629 2630 unsigned NumElems = Mask->getNumOperands(); 2631 if (NumElems != 2 && NumElems != 4) 2632 return false; 2633 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 2634 if (!isUndefOrEqual(Mask->getOperand(i), i)) 2635 return false; 2636 for (unsigned i = NumElems/2; i != NumElems; ++i) 2637 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems)) 2638 return false; 2639 return true; 2640} 2641 2642/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 2643/// all the same. 2644static bool isSplatVector(SDNode *N) { 2645 if (N->getOpcode() != ISD::BUILD_VECTOR) 2646 return false; 2647 2648 SDOperand SplatValue = N->getOperand(0); 2649 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 2650 if (N->getOperand(i) != SplatValue) 2651 return false; 2652 return true; 2653} 2654 2655/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2656/// to an undef. 2657static bool isUndefShuffle(SDNode *N) { 2658 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2659 return false; 2660 2661 SDOperand V1 = N->getOperand(0); 2662 SDOperand V2 = N->getOperand(1); 2663 SDOperand Mask = N->getOperand(2); 2664 unsigned NumElems = Mask.getNumOperands(); 2665 for (unsigned i = 0; i != NumElems; ++i) { 2666 SDOperand Arg = Mask.getOperand(i); 2667 if (Arg.getOpcode() != ISD::UNDEF) { 2668 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2669 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF) 2670 return false; 2671 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF) 2672 return false; 2673 } 2674 } 2675 return true; 2676} 2677 2678/// isZeroNode - Returns true if Elt is a constant zero or a floating point 2679/// constant +0.0. 2680static inline bool isZeroNode(SDOperand Elt) { 2681 return ((isa<ConstantSDNode>(Elt) && 2682 cast<ConstantSDNode>(Elt)->getValue() == 0) || 2683 (isa<ConstantFPSDNode>(Elt) && 2684 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 2685} 2686 2687/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2688/// to an zero vector. 2689static bool isZeroShuffle(SDNode *N) { 2690 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2691 return false; 2692 2693 SDOperand V1 = N->getOperand(0); 2694 SDOperand V2 = N->getOperand(1); 2695 SDOperand Mask = N->getOperand(2); 2696 unsigned NumElems = Mask.getNumOperands(); 2697 for (unsigned i = 0; i != NumElems; ++i) { 2698 SDOperand Arg = Mask.getOperand(i); 2699 if (Arg.getOpcode() == ISD::UNDEF) 2700 continue; 2701 2702 unsigned Idx = cast<ConstantSDNode>(Arg)->getValue(); 2703 if (Idx < NumElems) { 2704 unsigned Opc = V1.Val->getOpcode(); 2705 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.Val)) 2706 continue; 2707 if (Opc != ISD::BUILD_VECTOR || 2708 !isZeroNode(V1.Val->getOperand(Idx))) 2709 return false; 2710 } else if (Idx >= NumElems) { 2711 unsigned Opc = V2.Val->getOpcode(); 2712 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.Val)) 2713 continue; 2714 if (Opc != ISD::BUILD_VECTOR || 2715 !isZeroNode(V2.Val->getOperand(Idx - NumElems))) 2716 return false; 2717 } 2718 } 2719 return true; 2720} 2721 2722/// getZeroVector - Returns a vector of specified type with all zero elements. 2723/// 2724static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) { 2725 assert(MVT::isVector(VT) && "Expected a vector type"); 2726 2727 // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2728 // type. This ensures they get CSE'd. 2729 SDOperand Cst = DAG.getTargetConstant(0, MVT::i32); 2730 SDOperand Vec; 2731 if (MVT::getSizeInBits(VT) == 64) // MMX 2732 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2733 else // SSE 2734 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2735 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2736} 2737 2738/// getOnesVector - Returns a vector of specified type with all bits set. 2739/// 2740static SDOperand getOnesVector(MVT::ValueType VT, SelectionDAG &DAG) { 2741 assert(MVT::isVector(VT) && "Expected a vector type"); 2742 2743 // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2744 // type. This ensures they get CSE'd. 2745 SDOperand Cst = DAG.getTargetConstant(~0U, MVT::i32); 2746 SDOperand Vec; 2747 if (MVT::getSizeInBits(VT) == 64) // MMX 2748 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2749 else // SSE 2750 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2751 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2752} 2753 2754 2755/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 2756/// that point to V2 points to its first element. 2757static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { 2758 assert(Mask.getOpcode() == ISD::BUILD_VECTOR); 2759 2760 bool Changed = false; 2761 SmallVector<SDOperand, 8> MaskVec; 2762 unsigned NumElems = Mask.getNumOperands(); 2763 for (unsigned i = 0; i != NumElems; ++i) { 2764 SDOperand Arg = Mask.getOperand(i); 2765 if (Arg.getOpcode() != ISD::UNDEF) { 2766 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2767 if (Val > NumElems) { 2768 Arg = DAG.getConstant(NumElems, Arg.getValueType()); 2769 Changed = true; 2770 } 2771 } 2772 MaskVec.push_back(Arg); 2773 } 2774 2775 if (Changed) 2776 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), 2777 &MaskVec[0], MaskVec.size()); 2778 return Mask; 2779} 2780 2781/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 2782/// operation of specified width. 2783static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { 2784 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2785 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2786 2787 SmallVector<SDOperand, 8> MaskVec; 2788 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); 2789 for (unsigned i = 1; i != NumElems; ++i) 2790 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2791 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2792} 2793 2794/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation 2795/// of specified width. 2796static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { 2797 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2798 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2799 SmallVector<SDOperand, 8> MaskVec; 2800 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 2801 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2802 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); 2803 } 2804 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2805} 2806 2807/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation 2808/// of specified width. 2809static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { 2810 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2811 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2812 unsigned Half = NumElems/2; 2813 SmallVector<SDOperand, 8> MaskVec; 2814 for (unsigned i = 0; i != Half; ++i) { 2815 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); 2816 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); 2817 } 2818 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2819} 2820 2821/// getSwapEltZeroMask - Returns a vector_shuffle mask for a shuffle that swaps 2822/// element #0 of a vector with the specified index, leaving the rest of the 2823/// elements in place. 2824static SDOperand getSwapEltZeroMask(unsigned NumElems, unsigned DestElt, 2825 SelectionDAG &DAG) { 2826 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2827 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2828 SmallVector<SDOperand, 8> MaskVec; 2829 // Element #0 of the result gets the elt we are replacing. 2830 MaskVec.push_back(DAG.getConstant(DestElt, BaseVT)); 2831 for (unsigned i = 1; i != NumElems; ++i) 2832 MaskVec.push_back(DAG.getConstant(i == DestElt ? 0 : i, BaseVT)); 2833 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2834} 2835 2836/// PromoteSplat - Promote a splat of v4f32, v8i16 or v16i8 to v4i32. 2837static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG, bool HasSSE2) { 2838 MVT::ValueType PVT = HasSSE2 ? MVT::v4i32 : MVT::v4f32; 2839 MVT::ValueType VT = Op.getValueType(); 2840 if (PVT == VT) 2841 return Op; 2842 SDOperand V1 = Op.getOperand(0); 2843 SDOperand Mask = Op.getOperand(2); 2844 unsigned NumElems = Mask.getNumOperands(); 2845 // Special handling of v4f32 -> v4i32. 2846 if (VT != MVT::v4f32) { 2847 Mask = getUnpacklMask(NumElems, DAG); 2848 while (NumElems > 4) { 2849 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask); 2850 NumElems >>= 1; 2851 } 2852 Mask = getZeroVector(MVT::v4i32, DAG); 2853 } 2854 2855 V1 = DAG.getNode(ISD::BIT_CONVERT, PVT, V1); 2856 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, PVT, V1, 2857 DAG.getNode(ISD::UNDEF, PVT), Mask); 2858 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); 2859} 2860 2861/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 2862/// vector of zero or undef vector. This produces a shuffle where the low 2863/// element of V2 is swizzled into the zero/undef vector, landing at element 2864/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 2865static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, unsigned Idx, 2866 bool isZero, SelectionDAG &DAG) { 2867 MVT::ValueType VT = V2.getValueType(); 2868 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT); 2869 unsigned NumElems = MVT::getVectorNumElements(V2.getValueType()); 2870 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2871 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 2872 SmallVector<SDOperand, 16> MaskVec; 2873 for (unsigned i = 0; i != NumElems; ++i) 2874 if (i == Idx) // If this is the insertion idx, put the low elt of V2 here. 2875 MaskVec.push_back(DAG.getConstant(NumElems, EVT)); 2876 else 2877 MaskVec.push_back(DAG.getConstant(i, EVT)); 2878 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2879 &MaskVec[0], MaskVec.size()); 2880 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2881} 2882 2883/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 2884/// 2885static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, 2886 unsigned NumNonZero, unsigned NumZero, 2887 SelectionDAG &DAG, TargetLowering &TLI) { 2888 if (NumNonZero > 8) 2889 return SDOperand(); 2890 2891 SDOperand V(0, 0); 2892 bool First = true; 2893 for (unsigned i = 0; i < 16; ++i) { 2894 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 2895 if (ThisIsNonZero && First) { 2896 if (NumZero) 2897 V = getZeroVector(MVT::v8i16, DAG); 2898 else 2899 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2900 First = false; 2901 } 2902 2903 if ((i & 1) != 0) { 2904 SDOperand ThisElt(0, 0), LastElt(0, 0); 2905 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 2906 if (LastIsNonZero) { 2907 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); 2908 } 2909 if (ThisIsNonZero) { 2910 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i)); 2911 ThisElt = DAG.getNode(ISD::SHL, MVT::i16, 2912 ThisElt, DAG.getConstant(8, MVT::i8)); 2913 if (LastIsNonZero) 2914 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt); 2915 } else 2916 ThisElt = LastElt; 2917 2918 if (ThisElt.Val) 2919 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt, 2920 DAG.getIntPtrConstant(i/2)); 2921 } 2922 } 2923 2924 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V); 2925} 2926 2927/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 2928/// 2929static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, 2930 unsigned NumNonZero, unsigned NumZero, 2931 SelectionDAG &DAG, TargetLowering &TLI) { 2932 if (NumNonZero > 4) 2933 return SDOperand(); 2934 2935 SDOperand V(0, 0); 2936 bool First = true; 2937 for (unsigned i = 0; i < 8; ++i) { 2938 bool isNonZero = (NonZeros & (1 << i)) != 0; 2939 if (isNonZero) { 2940 if (First) { 2941 if (NumZero) 2942 V = getZeroVector(MVT::v8i16, DAG); 2943 else 2944 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2945 First = false; 2946 } 2947 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i), 2948 DAG.getIntPtrConstant(i)); 2949 } 2950 } 2951 2952 return V; 2953} 2954 2955SDOperand 2956X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 2957 // All zero's are handled with pxor, all one's are handled with pcmpeqd. 2958 if (ISD::isBuildVectorAllZeros(Op.Val) || ISD::isBuildVectorAllOnes(Op.Val)) { 2959 // Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to 2960 // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are 2961 // eliminated on x86-32 hosts. 2962 if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32) 2963 return Op; 2964 2965 if (ISD::isBuildVectorAllOnes(Op.Val)) 2966 return getOnesVector(Op.getValueType(), DAG); 2967 return getZeroVector(Op.getValueType(), DAG); 2968 } 2969 2970 MVT::ValueType VT = Op.getValueType(); 2971 MVT::ValueType EVT = MVT::getVectorElementType(VT); 2972 unsigned EVTBits = MVT::getSizeInBits(EVT); 2973 2974 unsigned NumElems = Op.getNumOperands(); 2975 unsigned NumZero = 0; 2976 unsigned NumNonZero = 0; 2977 unsigned NonZeros = 0; 2978 bool IsAllConstants = true; 2979 SmallSet<SDOperand, 8> Values; 2980 for (unsigned i = 0; i < NumElems; ++i) { 2981 SDOperand Elt = Op.getOperand(i); 2982 if (Elt.getOpcode() == ISD::UNDEF) 2983 continue; 2984 Values.insert(Elt); 2985 if (Elt.getOpcode() != ISD::Constant && 2986 Elt.getOpcode() != ISD::ConstantFP) 2987 IsAllConstants = false; 2988 if (isZeroNode(Elt)) 2989 NumZero++; 2990 else { 2991 NonZeros |= (1 << i); 2992 NumNonZero++; 2993 } 2994 } 2995 2996 if (NumNonZero == 0) { 2997 // All undef vector. Return an UNDEF. All zero vectors were handled above. 2998 return DAG.getNode(ISD::UNDEF, VT); 2999 } 3000 3001 // Special case for single non-zero, non-undef, element. 3002 if (NumNonZero == 1 && NumElems <= 4) { 3003 unsigned Idx = CountTrailingZeros_32(NonZeros); 3004 SDOperand Item = Op.getOperand(Idx); 3005 3006 // If this is an insertion of an i64 value on x86-32, and if the top bits of 3007 // the value are obviously zero, truncate the value to i32 and do the 3008 // insertion that way. Only do this if the value is non-constant or if the 3009 // value is a constant being inserted into element 0. It is cheaper to do 3010 // a constant pool load than it is to do a movd + shuffle. 3011 if (EVT == MVT::i64 && !Subtarget->is64Bit() && 3012 (!IsAllConstants || Idx == 0)) { 3013 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 3014 // Handle MMX and SSE both. 3015 MVT::ValueType VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32; 3016 MVT::ValueType VecElts = VT == MVT::v2i64 ? 4 : 2; 3017 3018 // Truncate the value (which may itself be a constant) to i32, and 3019 // convert it to a vector with movd (S2V+shuffle to zero extend). 3020 Item = DAG.getNode(ISD::TRUNCATE, MVT::i32, Item); 3021 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VecVT, Item); 3022 Item = getShuffleVectorZeroOrUndef(Item, 0, true, DAG); 3023 3024 // Now we have our 32-bit value zero extended in the low element of 3025 // a vector. If Idx != 0, swizzle it into place. 3026 if (Idx != 0) { 3027 SDOperand Ops[] = { 3028 Item, DAG.getNode(ISD::UNDEF, Item.getValueType()), 3029 getSwapEltZeroMask(VecElts, Idx, DAG) 3030 }; 3031 Item = DAG.getNode(ISD::VECTOR_SHUFFLE, VecVT, Ops, 3); 3032 } 3033 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Item); 3034 } 3035 } 3036 3037 // If we have a constant or non-constant insertion into the low element of 3038 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 3039 // the rest of the elements. This will be matched as movd/movq/movss/movsd 3040 // depending on what the source datatype is. Because we can only get here 3041 // when NumElems <= 4, this only needs to handle i32/f32/i64/f64. 3042 if (Idx == 0 && 3043 // Don't do this for i64 values on x86-32. 3044 (EVT != MVT::i64 || Subtarget->is64Bit())) { 3045 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 3046 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 3047 return getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, DAG); 3048 } 3049 3050 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 3051 return SDOperand(); 3052 3053 // Otherwise, if this is a vector with i32 or f32 elements, and the element 3054 // is a non-constant being inserted into an element other than the low one, 3055 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 3056 // movd/movss) to move this into the low element, then shuffle it into 3057 // place. 3058 if (EVTBits == 32) { 3059 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 3060 3061 // Turn it into a shuffle of zero and zero-extended scalar to vector. 3062 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, DAG); 3063 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3064 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3065 SmallVector<SDOperand, 8> MaskVec; 3066 for (unsigned i = 0; i < NumElems; i++) 3067 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); 3068 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3069 &MaskVec[0], MaskVec.size()); 3070 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item, 3071 DAG.getNode(ISD::UNDEF, VT), Mask); 3072 } 3073 } 3074 3075 // Splat is obviously ok. Let legalizer expand it to a shuffle. 3076 if (Values.size() == 1) 3077 return SDOperand(); 3078 3079 // A vector full of immediates; various special cases are already 3080 // handled, so this is best done with a single constant-pool load. 3081 if (IsAllConstants) 3082 return SDOperand(); 3083 3084 // Let legalizer expand 2-wide build_vectors. 3085 if (EVTBits == 64) 3086 return SDOperand(); 3087 3088 // If element VT is < 32 bits, convert it to inserts into a zero vector. 3089 if (EVTBits == 8 && NumElems == 16) { 3090 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 3091 *this); 3092 if (V.Val) return V; 3093 } 3094 3095 if (EVTBits == 16 && NumElems == 8) { 3096 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 3097 *this); 3098 if (V.Val) return V; 3099 } 3100 3101 // If element VT is == 32 bits, turn it into a number of shuffles. 3102 SmallVector<SDOperand, 8> V; 3103 V.resize(NumElems); 3104 if (NumElems == 4 && NumZero > 0) { 3105 for (unsigned i = 0; i < 4; ++i) { 3106 bool isZero = !(NonZeros & (1 << i)); 3107 if (isZero) 3108 V[i] = getZeroVector(VT, DAG); 3109 else 3110 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3111 } 3112 3113 for (unsigned i = 0; i < 2; ++i) { 3114 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 3115 default: break; 3116 case 0: 3117 V[i] = V[i*2]; // Must be a zero vector. 3118 break; 3119 case 1: 3120 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2], 3121 getMOVLMask(NumElems, DAG)); 3122 break; 3123 case 2: 3124 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3125 getMOVLMask(NumElems, DAG)); 3126 break; 3127 case 3: 3128 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3129 getUnpacklMask(NumElems, DAG)); 3130 break; 3131 } 3132 } 3133 3134 // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd) 3135 // clears the upper bits. 3136 // FIXME: we can do the same for v4f32 case when we know both parts of 3137 // the lower half come from scalar_to_vector (loadf32). We should do 3138 // that in post legalizer dag combiner with target specific hooks. 3139 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0) 3140 return V[0]; 3141 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3142 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 3143 SmallVector<SDOperand, 8> MaskVec; 3144 bool Reverse = (NonZeros & 0x3) == 2; 3145 for (unsigned i = 0; i < 2; ++i) 3146 if (Reverse) 3147 MaskVec.push_back(DAG.getConstant(1-i, EVT)); 3148 else 3149 MaskVec.push_back(DAG.getConstant(i, EVT)); 3150 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 3151 for (unsigned i = 0; i < 2; ++i) 3152 if (Reverse) 3153 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); 3154 else 3155 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); 3156 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3157 &MaskVec[0], MaskVec.size()); 3158 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask); 3159 } 3160 3161 if (Values.size() > 2) { 3162 // Expand into a number of unpckl*. 3163 // e.g. for v4f32 3164 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 3165 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 3166 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 3167 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG); 3168 for (unsigned i = 0; i < NumElems; ++i) 3169 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3170 NumElems >>= 1; 3171 while (NumElems != 0) { 3172 for (unsigned i = 0; i < NumElems; ++i) 3173 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems], 3174 UnpckMask); 3175 NumElems >>= 1; 3176 } 3177 return V[0]; 3178 } 3179 3180 return SDOperand(); 3181} 3182 3183static 3184SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, 3185 SDOperand PermMask, SelectionDAG &DAG, 3186 TargetLowering &TLI) { 3187 SDOperand NewV; 3188 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(8); 3189 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3190 MVT::ValueType PtrVT = TLI.getPointerTy(); 3191 SmallVector<SDOperand, 8> MaskElts(PermMask.Val->op_begin(), 3192 PermMask.Val->op_end()); 3193 3194 // First record which half of which vector the low elements come from. 3195 SmallVector<unsigned, 4> LowQuad(4); 3196 for (unsigned i = 0; i < 4; ++i) { 3197 SDOperand Elt = MaskElts[i]; 3198 if (Elt.getOpcode() == ISD::UNDEF) 3199 continue; 3200 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3201 int QuadIdx = EltIdx / 4; 3202 ++LowQuad[QuadIdx]; 3203 } 3204 int BestLowQuad = -1; 3205 unsigned MaxQuad = 1; 3206 for (unsigned i = 0; i < 4; ++i) { 3207 if (LowQuad[i] > MaxQuad) { 3208 BestLowQuad = i; 3209 MaxQuad = LowQuad[i]; 3210 } 3211 } 3212 3213 // Record which half of which vector the high elements come from. 3214 SmallVector<unsigned, 4> HighQuad(4); 3215 for (unsigned i = 4; i < 8; ++i) { 3216 SDOperand Elt = MaskElts[i]; 3217 if (Elt.getOpcode() == ISD::UNDEF) 3218 continue; 3219 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3220 int QuadIdx = EltIdx / 4; 3221 ++HighQuad[QuadIdx]; 3222 } 3223 int BestHighQuad = -1; 3224 MaxQuad = 1; 3225 for (unsigned i = 0; i < 4; ++i) { 3226 if (HighQuad[i] > MaxQuad) { 3227 BestHighQuad = i; 3228 MaxQuad = HighQuad[i]; 3229 } 3230 } 3231 3232 // If it's possible to sort parts of either half with PSHUF{H|L}W, then do it. 3233 if (BestLowQuad != -1 || BestHighQuad != -1) { 3234 // First sort the 4 chunks in order using shufpd. 3235 SmallVector<SDOperand, 8> MaskVec; 3236 if (BestLowQuad != -1) 3237 MaskVec.push_back(DAG.getConstant(BestLowQuad, MVT::i32)); 3238 else 3239 MaskVec.push_back(DAG.getConstant(0, MVT::i32)); 3240 if (BestHighQuad != -1) 3241 MaskVec.push_back(DAG.getConstant(BestHighQuad, MVT::i32)); 3242 else 3243 MaskVec.push_back(DAG.getConstant(1, MVT::i32)); 3244 SDOperand Mask= DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, &MaskVec[0],2); 3245 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64, 3246 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V1), 3247 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V2), Mask); 3248 NewV = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, NewV); 3249 3250 // Now sort high and low parts separately. 3251 BitVector InOrder(8); 3252 if (BestLowQuad != -1) { 3253 // Sort lower half in order using PSHUFLW. 3254 MaskVec.clear(); 3255 bool AnyOutOrder = false; 3256 for (unsigned i = 0; i != 4; ++i) { 3257 SDOperand Elt = MaskElts[i]; 3258 if (Elt.getOpcode() == ISD::UNDEF) { 3259 MaskVec.push_back(Elt); 3260 InOrder.set(i); 3261 } else { 3262 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3263 if (EltIdx != i) 3264 AnyOutOrder = true; 3265 MaskVec.push_back(DAG.getConstant(EltIdx % 4, MaskEVT)); 3266 // If this element is in the right place after this shuffle, then 3267 // remember it. 3268 if ((int)(EltIdx / 4) == BestLowQuad) 3269 InOrder.set(i); 3270 } 3271 } 3272 if (AnyOutOrder) { 3273 for (unsigned i = 4; i != 8; ++i) 3274 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3275 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3276 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3277 } 3278 } 3279 3280 if (BestHighQuad != -1) { 3281 // Sort high half in order using PSHUFHW if possible. 3282 MaskVec.clear(); 3283 for (unsigned i = 0; i != 4; ++i) 3284 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3285 bool AnyOutOrder = false; 3286 for (unsigned i = 4; i != 8; ++i) { 3287 SDOperand Elt = MaskElts[i]; 3288 if (Elt.getOpcode() == ISD::UNDEF) { 3289 MaskVec.push_back(Elt); 3290 InOrder.set(i); 3291 } else { 3292 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3293 if (EltIdx != i) 3294 AnyOutOrder = true; 3295 MaskVec.push_back(DAG.getConstant((EltIdx % 4) + 4, MaskEVT)); 3296 // If this element is in the right place after this shuffle, then 3297 // remember it. 3298 if ((int)(EltIdx / 4) == BestHighQuad) 3299 InOrder.set(i); 3300 } 3301 } 3302 if (AnyOutOrder) { 3303 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3304 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3305 } 3306 } 3307 3308 // The other elements are put in the right place using pextrw and pinsrw. 3309 for (unsigned i = 0; i != 8; ++i) { 3310 if (InOrder[i]) 3311 continue; 3312 SDOperand Elt = MaskElts[i]; 3313 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3314 if (EltIdx == i) 3315 continue; 3316 SDOperand ExtOp = (EltIdx < 8) 3317 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3318 DAG.getConstant(EltIdx, PtrVT)) 3319 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3320 DAG.getConstant(EltIdx - 8, PtrVT)); 3321 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3322 DAG.getConstant(i, PtrVT)); 3323 } 3324 return NewV; 3325 } 3326 3327 // PSHUF{H|L}W are not used. Lower into extracts and inserts but try to use 3328 ///as few as possible. 3329 // First, let's find out how many elements are already in the right order. 3330 unsigned V1InOrder = 0; 3331 unsigned V1FromV1 = 0; 3332 unsigned V2InOrder = 0; 3333 unsigned V2FromV2 = 0; 3334 SmallVector<SDOperand, 8> V1Elts; 3335 SmallVector<SDOperand, 8> V2Elts; 3336 for (unsigned i = 0; i < 8; ++i) { 3337 SDOperand Elt = MaskElts[i]; 3338 if (Elt.getOpcode() == ISD::UNDEF) { 3339 V1Elts.push_back(Elt); 3340 V2Elts.push_back(Elt); 3341 ++V1InOrder; 3342 ++V2InOrder; 3343 continue; 3344 } 3345 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3346 if (EltIdx == i) { 3347 V1Elts.push_back(Elt); 3348 V2Elts.push_back(DAG.getConstant(i+8, MaskEVT)); 3349 ++V1InOrder; 3350 } else if (EltIdx == i+8) { 3351 V1Elts.push_back(Elt); 3352 V2Elts.push_back(DAG.getConstant(i, MaskEVT)); 3353 ++V2InOrder; 3354 } else if (EltIdx < 8) { 3355 V1Elts.push_back(Elt); 3356 ++V1FromV1; 3357 } else { 3358 V2Elts.push_back(DAG.getConstant(EltIdx-8, MaskEVT)); 3359 ++V2FromV2; 3360 } 3361 } 3362 3363 if (V2InOrder > V1InOrder) { 3364 PermMask = CommuteVectorShuffleMask(PermMask, DAG); 3365 std::swap(V1, V2); 3366 std::swap(V1Elts, V2Elts); 3367 std::swap(V1FromV1, V2FromV2); 3368 } 3369 3370 if ((V1FromV1 + V1InOrder) != 8) { 3371 // Some elements are from V2. 3372 if (V1FromV1) { 3373 // If there are elements that are from V1 but out of place, 3374 // then first sort them in place 3375 SmallVector<SDOperand, 8> MaskVec; 3376 for (unsigned i = 0; i < 8; ++i) { 3377 SDOperand Elt = V1Elts[i]; 3378 if (Elt.getOpcode() == ISD::UNDEF) { 3379 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3380 continue; 3381 } 3382 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3383 if (EltIdx >= 8) 3384 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3385 else 3386 MaskVec.push_back(DAG.getConstant(EltIdx, MaskEVT)); 3387 } 3388 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3389 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, V1, V1, Mask); 3390 } 3391 3392 NewV = V1; 3393 for (unsigned i = 0; i < 8; ++i) { 3394 SDOperand Elt = V1Elts[i]; 3395 if (Elt.getOpcode() == ISD::UNDEF) 3396 continue; 3397 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3398 if (EltIdx < 8) 3399 continue; 3400 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3401 DAG.getConstant(EltIdx - 8, PtrVT)); 3402 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3403 DAG.getConstant(i, PtrVT)); 3404 } 3405 return NewV; 3406 } else { 3407 // All elements are from V1. 3408 NewV = V1; 3409 for (unsigned i = 0; i < 8; ++i) { 3410 SDOperand Elt = V1Elts[i]; 3411 if (Elt.getOpcode() == ISD::UNDEF) 3412 continue; 3413 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3414 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3415 DAG.getConstant(EltIdx, PtrVT)); 3416 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3417 DAG.getConstant(i, PtrVT)); 3418 } 3419 return NewV; 3420 } 3421} 3422 3423/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 3424/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be 3425/// done when every pair / quad of shuffle mask elements point to elements in 3426/// the right sequence. e.g. 3427/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15> 3428static 3429SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2, 3430 MVT::ValueType VT, 3431 SDOperand PermMask, SelectionDAG &DAG, 3432 TargetLowering &TLI) { 3433 unsigned NumElems = PermMask.getNumOperands(); 3434 unsigned NewWidth = (NumElems == 4) ? 2 : 4; 3435 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NewWidth); 3436 MVT::ValueType NewVT = MaskVT; 3437 switch (VT) { 3438 case MVT::v4f32: NewVT = MVT::v2f64; break; 3439 case MVT::v4i32: NewVT = MVT::v2i64; break; 3440 case MVT::v8i16: NewVT = MVT::v4i32; break; 3441 case MVT::v16i8: NewVT = MVT::v4i32; break; 3442 default: assert(false && "Unexpected!"); 3443 } 3444 3445 if (NewWidth == 2) { 3446 if (MVT::isInteger(VT)) 3447 NewVT = MVT::v2i64; 3448 else 3449 NewVT = MVT::v2f64; 3450 } 3451 unsigned Scale = NumElems / NewWidth; 3452 SmallVector<SDOperand, 8> MaskVec; 3453 for (unsigned i = 0; i < NumElems; i += Scale) { 3454 unsigned StartIdx = ~0U; 3455 for (unsigned j = 0; j < Scale; ++j) { 3456 SDOperand Elt = PermMask.getOperand(i+j); 3457 if (Elt.getOpcode() == ISD::UNDEF) 3458 continue; 3459 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3460 if (StartIdx == ~0U) 3461 StartIdx = EltIdx - (EltIdx % Scale); 3462 if (EltIdx != StartIdx + j) 3463 return SDOperand(); 3464 } 3465 if (StartIdx == ~0U) 3466 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MVT::i32)); 3467 else 3468 MaskVec.push_back(DAG.getConstant(StartIdx / Scale, MVT::i32)); 3469 } 3470 3471 V1 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V1); 3472 V2 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V2); 3473 return DAG.getNode(ISD::VECTOR_SHUFFLE, NewVT, V1, V2, 3474 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3475 &MaskVec[0], MaskVec.size())); 3476} 3477 3478SDOperand 3479X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 3480 SDOperand V1 = Op.getOperand(0); 3481 SDOperand V2 = Op.getOperand(1); 3482 SDOperand PermMask = Op.getOperand(2); 3483 MVT::ValueType VT = Op.getValueType(); 3484 unsigned NumElems = PermMask.getNumOperands(); 3485 bool isMMX = MVT::getSizeInBits(VT) == 64; 3486 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 3487 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 3488 bool V1IsSplat = false; 3489 bool V2IsSplat = false; 3490 3491 if (isUndefShuffle(Op.Val)) 3492 return DAG.getNode(ISD::UNDEF, VT); 3493 3494 if (isZeroShuffle(Op.Val)) 3495 return getZeroVector(VT, DAG); 3496 3497 if (isIdentityMask(PermMask.Val)) 3498 return V1; 3499 else if (isIdentityMask(PermMask.Val, true)) 3500 return V2; 3501 3502 if (isSplatMask(PermMask.Val)) { 3503 if (isMMX || NumElems < 4) return Op; 3504 // Promote it to a v4{if}32 splat. 3505 return PromoteSplat(Op, DAG, Subtarget->hasSSE2()); 3506 } 3507 3508 // If the shuffle can be profitably rewritten as a narrower shuffle, then 3509 // do it! 3510 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 3511 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3512 if (NewOp.Val) 3513 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3514 } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 3515 // FIXME: Figure out a cleaner way to do this. 3516 // Try to make use of movq to zero out the top part. 3517 if (ISD::isBuildVectorAllZeros(V2.Val)) { 3518 SDOperand NewOp = RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3519 if (NewOp.Val) { 3520 SDOperand NewV1 = NewOp.getOperand(0); 3521 SDOperand NewV2 = NewOp.getOperand(1); 3522 SDOperand NewMask = NewOp.getOperand(2); 3523 if (isCommutedMOVL(NewMask.Val, true, false)) { 3524 NewOp = CommuteVectorShuffle(NewOp, NewV1, NewV2, NewMask, DAG); 3525 NewOp = DAG.getNode(ISD::VECTOR_SHUFFLE, NewOp.getValueType(), 3526 NewV1, NewV2, getMOVLMask(2, DAG)); 3527 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3528 } 3529 } 3530 } else if (ISD::isBuildVectorAllZeros(V1.Val)) { 3531 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3532 if (NewOp.Val && X86::isMOVLMask(NewOp.getOperand(2).Val)) 3533 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3534 } 3535 } 3536 3537 if (X86::isMOVLMask(PermMask.Val)) 3538 return (V1IsUndef) ? V2 : Op; 3539 3540 if (X86::isMOVSHDUPMask(PermMask.Val) || 3541 X86::isMOVSLDUPMask(PermMask.Val) || 3542 X86::isMOVHLPSMask(PermMask.Val) || 3543 X86::isMOVHPMask(PermMask.Val) || 3544 X86::isMOVLPMask(PermMask.Val)) 3545 return Op; 3546 3547 if (ShouldXformToMOVHLPS(PermMask.Val) || 3548 ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val)) 3549 return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3550 3551 bool Commuted = false; 3552 // FIXME: This should also accept a bitcast of a splat? Be careful, not 3553 // 1,1,1,1 -> v8i16 though. 3554 V1IsSplat = isSplatVector(V1.Val); 3555 V2IsSplat = isSplatVector(V2.Val); 3556 3557 // Canonicalize the splat or undef, if present, to be on the RHS. 3558 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 3559 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3560 std::swap(V1IsSplat, V2IsSplat); 3561 std::swap(V1IsUndef, V2IsUndef); 3562 Commuted = true; 3563 } 3564 3565 // FIXME: Figure out a cleaner way to do this. 3566 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) { 3567 if (V2IsUndef) return V1; 3568 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3569 if (V2IsSplat) { 3570 // V2 is a splat, so the mask may be malformed. That is, it may point 3571 // to any V2 element. The instruction selectior won't like this. Get 3572 // a corrected mask and commute to form a proper MOVS{S|D}. 3573 SDOperand NewMask = getMOVLMask(NumElems, DAG); 3574 if (NewMask.Val != PermMask.Val) 3575 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3576 } 3577 return Op; 3578 } 3579 3580 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3581 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3582 X86::isUNPCKLMask(PermMask.Val) || 3583 X86::isUNPCKHMask(PermMask.Val)) 3584 return Op; 3585 3586 if (V2IsSplat) { 3587 // Normalize mask so all entries that point to V2 points to its first 3588 // element then try to match unpck{h|l} again. If match, return a 3589 // new vector_shuffle with the corrected mask. 3590 SDOperand NewMask = NormalizeMask(PermMask, DAG); 3591 if (NewMask.Val != PermMask.Val) { 3592 if (X86::isUNPCKLMask(PermMask.Val, true)) { 3593 SDOperand NewMask = getUnpacklMask(NumElems, DAG); 3594 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3595 } else if (X86::isUNPCKHMask(PermMask.Val, true)) { 3596 SDOperand NewMask = getUnpackhMask(NumElems, DAG); 3597 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3598 } 3599 } 3600 } 3601 3602 // Normalize the node to match x86 shuffle ops if needed 3603 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val)) 3604 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3605 3606 if (Commuted) { 3607 // Commute is back and try unpck* again. 3608 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3609 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3610 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3611 X86::isUNPCKLMask(PermMask.Val) || 3612 X86::isUNPCKHMask(PermMask.Val)) 3613 return Op; 3614 } 3615 3616 // Try PSHUF* first, then SHUFP*. 3617 // MMX doesn't have PSHUFD but it does have PSHUFW. While it's theoretically 3618 // possible to shuffle a v2i32 using PSHUFW, that's not yet implemented. 3619 if (isMMX && NumElems == 4 && X86::isPSHUFDMask(PermMask.Val)) { 3620 if (V2.getOpcode() != ISD::UNDEF) 3621 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3622 DAG.getNode(ISD::UNDEF, VT), PermMask); 3623 return Op; 3624 } 3625 3626 if (!isMMX) { 3627 if (Subtarget->hasSSE2() && 3628 (X86::isPSHUFDMask(PermMask.Val) || 3629 X86::isPSHUFHWMask(PermMask.Val) || 3630 X86::isPSHUFLWMask(PermMask.Val))) { 3631 MVT::ValueType RVT = VT; 3632 if (VT == MVT::v4f32) { 3633 RVT = MVT::v4i32; 3634 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, RVT, 3635 DAG.getNode(ISD::BIT_CONVERT, RVT, V1), 3636 DAG.getNode(ISD::UNDEF, RVT), PermMask); 3637 } else if (V2.getOpcode() != ISD::UNDEF) 3638 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, RVT, V1, 3639 DAG.getNode(ISD::UNDEF, RVT), PermMask); 3640 if (RVT != VT) 3641 Op = DAG.getNode(ISD::BIT_CONVERT, VT, Op); 3642 return Op; 3643 } 3644 3645 // Binary or unary shufps. 3646 if (X86::isSHUFPMask(PermMask.Val) || 3647 (V2.getOpcode() == ISD::UNDEF && X86::isPSHUFDMask(PermMask.Val))) 3648 return Op; 3649 } 3650 3651 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 3652 if (VT == MVT::v8i16) { 3653 SDOperand NewOp = LowerVECTOR_SHUFFLEv8i16(V1, V2, PermMask, DAG, *this); 3654 if (NewOp.Val) 3655 return NewOp; 3656 } 3657 3658 // Handle all 4 wide cases with a number of shuffles. 3659 if (NumElems == 4 && !isMMX) { 3660 // Don't do this for MMX. 3661 MVT::ValueType MaskVT = PermMask.getValueType(); 3662 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3663 SmallVector<std::pair<int, int>, 8> Locs; 3664 Locs.reserve(NumElems); 3665 SmallVector<SDOperand, 8> Mask1(NumElems, 3666 DAG.getNode(ISD::UNDEF, MaskEVT)); 3667 SmallVector<SDOperand, 8> Mask2(NumElems, 3668 DAG.getNode(ISD::UNDEF, MaskEVT)); 3669 unsigned NumHi = 0; 3670 unsigned NumLo = 0; 3671 // If no more than two elements come from either vector. This can be 3672 // implemented with two shuffles. First shuffle gather the elements. 3673 // The second shuffle, which takes the first shuffle as both of its 3674 // vector operands, put the elements into the right order. 3675 for (unsigned i = 0; i != NumElems; ++i) { 3676 SDOperand Elt = PermMask.getOperand(i); 3677 if (Elt.getOpcode() == ISD::UNDEF) { 3678 Locs[i] = std::make_pair(-1, -1); 3679 } else { 3680 unsigned Val = cast<ConstantSDNode>(Elt)->getValue(); 3681 if (Val < NumElems) { 3682 Locs[i] = std::make_pair(0, NumLo); 3683 Mask1[NumLo] = Elt; 3684 NumLo++; 3685 } else { 3686 Locs[i] = std::make_pair(1, NumHi); 3687 if (2+NumHi < NumElems) 3688 Mask1[2+NumHi] = Elt; 3689 NumHi++; 3690 } 3691 } 3692 } 3693 if (NumLo <= 2 && NumHi <= 2) { 3694 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3695 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3696 &Mask1[0], Mask1.size())); 3697 for (unsigned i = 0; i != NumElems; ++i) { 3698 if (Locs[i].first == -1) 3699 continue; 3700 else { 3701 unsigned Idx = (i < NumElems/2) ? 0 : NumElems; 3702 Idx += Locs[i].first * (NumElems/2) + Locs[i].second; 3703 Mask2[i] = DAG.getConstant(Idx, MaskEVT); 3704 } 3705 } 3706 3707 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, 3708 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3709 &Mask2[0], Mask2.size())); 3710 } 3711 3712 // Break it into (shuffle shuffle_hi, shuffle_lo). 3713 Locs.clear(); 3714 SmallVector<SDOperand,8> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3715 SmallVector<SDOperand,8> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3716 SmallVector<SDOperand,8> *MaskPtr = &LoMask; 3717 unsigned MaskIdx = 0; 3718 unsigned LoIdx = 0; 3719 unsigned HiIdx = NumElems/2; 3720 for (unsigned i = 0; i != NumElems; ++i) { 3721 if (i == NumElems/2) { 3722 MaskPtr = &HiMask; 3723 MaskIdx = 1; 3724 LoIdx = 0; 3725 HiIdx = NumElems/2; 3726 } 3727 SDOperand Elt = PermMask.getOperand(i); 3728 if (Elt.getOpcode() == ISD::UNDEF) { 3729 Locs[i] = std::make_pair(-1, -1); 3730 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) { 3731 Locs[i] = std::make_pair(MaskIdx, LoIdx); 3732 (*MaskPtr)[LoIdx] = Elt; 3733 LoIdx++; 3734 } else { 3735 Locs[i] = std::make_pair(MaskIdx, HiIdx); 3736 (*MaskPtr)[HiIdx] = Elt; 3737 HiIdx++; 3738 } 3739 } 3740 3741 SDOperand LoShuffle = 3742 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3743 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3744 &LoMask[0], LoMask.size())); 3745 SDOperand HiShuffle = 3746 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3747 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3748 &HiMask[0], HiMask.size())); 3749 SmallVector<SDOperand, 8> MaskOps; 3750 for (unsigned i = 0; i != NumElems; ++i) { 3751 if (Locs[i].first == -1) { 3752 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3753 } else { 3754 unsigned Idx = Locs[i].first * NumElems + Locs[i].second; 3755 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT)); 3756 } 3757 } 3758 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle, 3759 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3760 &MaskOps[0], MaskOps.size())); 3761 } 3762 3763 return SDOperand(); 3764} 3765 3766SDOperand 3767X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDOperand Op, 3768 SelectionDAG &DAG) { 3769 MVT::ValueType VT = Op.getValueType(); 3770 if (MVT::getSizeInBits(VT) == 8) { 3771 SDOperand Extract = DAG.getNode(X86ISD::PEXTRB, MVT::i32, 3772 Op.getOperand(0), Op.getOperand(1)); 3773 SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, 3774 DAG.getValueType(VT)); 3775 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3776 } else if (MVT::getSizeInBits(VT) == 16) { 3777 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, MVT::i32, 3778 Op.getOperand(0), Op.getOperand(1)); 3779 SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, 3780 DAG.getValueType(VT)); 3781 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3782 } else if (VT == MVT::f32) { 3783 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy 3784 // the result back to FR32 register. It's only worth matching if the 3785 // result has a single use which is a store or a bitcast to i32. 3786 if (!Op.hasOneUse()) 3787 return SDOperand(); 3788 SDNode *User = Op.Val->use_begin()->getUser(); 3789 if (User->getOpcode() != ISD::STORE && 3790 (User->getOpcode() != ISD::BIT_CONVERT || 3791 User->getValueType(0) != MVT::i32)) 3792 return SDOperand(); 3793 SDOperand Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, 3794 DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Op.getOperand(0)), 3795 Op.getOperand(1)); 3796 return DAG.getNode(ISD::BIT_CONVERT, MVT::f32, Extract); 3797 } 3798 return SDOperand(); 3799} 3800 3801 3802SDOperand 3803X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3804 if (!isa<ConstantSDNode>(Op.getOperand(1))) 3805 return SDOperand(); 3806 3807 if (Subtarget->hasSSE41()) { 3808 SDOperand Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 3809 if (Res.Val) 3810 return Res; 3811 } 3812 3813 MVT::ValueType VT = Op.getValueType(); 3814 // TODO: handle v16i8. 3815 if (MVT::getSizeInBits(VT) == 16) { 3816 SDOperand Vec = Op.getOperand(0); 3817 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3818 if (Idx == 0) 3819 return DAG.getNode(ISD::TRUNCATE, MVT::i16, 3820 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, 3821 DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Vec), 3822 Op.getOperand(1))); 3823 // Transform it so it match pextrw which produces a 32-bit result. 3824 MVT::ValueType EVT = (MVT::ValueType)(VT+1); 3825 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, 3826 Op.getOperand(0), Op.getOperand(1)); 3827 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, 3828 DAG.getValueType(VT)); 3829 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3830 } else if (MVT::getSizeInBits(VT) == 32) { 3831 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3832 if (Idx == 0) 3833 return Op; 3834 // SHUFPS the element to the lowest double word, then movss. 3835 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3836 SmallVector<SDOperand, 8> IdxVec; 3837 IdxVec. 3838 push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT))); 3839 IdxVec. 3840 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3841 IdxVec. 3842 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3843 IdxVec. 3844 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3845 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3846 &IdxVec[0], IdxVec.size()); 3847 SDOperand Vec = Op.getOperand(0); 3848 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3849 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3850 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3851 DAG.getIntPtrConstant(0)); 3852 } else if (MVT::getSizeInBits(VT) == 64) { 3853 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 3854 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 3855 // to match extract_elt for f64. 3856 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3857 if (Idx == 0) 3858 return Op; 3859 3860 // UNPCKHPD the element to the lowest double word, then movsd. 3861 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 3862 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 3863 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3864 SmallVector<SDOperand, 8> IdxVec; 3865 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT))); 3866 IdxVec. 3867 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3868 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3869 &IdxVec[0], IdxVec.size()); 3870 SDOperand Vec = Op.getOperand(0); 3871 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3872 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3873 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3874 DAG.getIntPtrConstant(0)); 3875 } 3876 3877 return SDOperand(); 3878} 3879 3880SDOperand 3881X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG){ 3882 MVT::ValueType VT = Op.getValueType(); 3883 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3884 3885 SDOperand N0 = Op.getOperand(0); 3886 SDOperand N1 = Op.getOperand(1); 3887 SDOperand N2 = Op.getOperand(2); 3888 3889 if ((MVT::getSizeInBits(EVT) == 8) || (MVT::getSizeInBits(EVT) == 16)) { 3890 unsigned Opc = (MVT::getSizeInBits(EVT) == 8) ? X86ISD::PINSRB 3891 : X86ISD::PINSRW; 3892 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 3893 // argument. 3894 if (N1.getValueType() != MVT::i32) 3895 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3896 if (N2.getValueType() != MVT::i32) 3897 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); 3898 return DAG.getNode(Opc, VT, N0, N1, N2); 3899 } else if (EVT == MVT::f32) { 3900 // Bits [7:6] of the constant are the source select. This will always be 3901 // zero here. The DAG Combiner may combine an extract_elt index into these 3902 // bits. For example (insert (extract, 3), 2) could be matched by putting 3903 // the '3' into bits [7:6] of X86ISD::INSERTPS. 3904 // Bits [5:4] of the constant are the destination select. This is the 3905 // value of the incoming immediate. 3906 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 3907 // combine either bitwise AND or insert of float 0.0 to set these bits. 3908 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue() << 4); 3909 return DAG.getNode(X86ISD::INSERTPS, VT, N0, N1, N2); 3910 } 3911 return SDOperand(); 3912} 3913 3914SDOperand 3915X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3916 MVT::ValueType VT = Op.getValueType(); 3917 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3918 3919 if (Subtarget->hasSSE41()) 3920 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 3921 3922 if (EVT == MVT::i8) 3923 return SDOperand(); 3924 3925 SDOperand N0 = Op.getOperand(0); 3926 SDOperand N1 = Op.getOperand(1); 3927 SDOperand N2 = Op.getOperand(2); 3928 3929 if (MVT::getSizeInBits(EVT) == 16) { 3930 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 3931 // as its second argument. 3932 if (N1.getValueType() != MVT::i32) 3933 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3934 if (N2.getValueType() != MVT::i32) 3935 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); 3936 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2); 3937 } 3938 return SDOperand(); 3939} 3940 3941SDOperand 3942X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3943 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); 3944 MVT::ValueType VT = MVT::v2i32; 3945 switch (Op.getValueType()) { 3946 default: break; 3947 case MVT::v16i8: 3948 case MVT::v8i16: 3949 VT = MVT::v4i32; 3950 break; 3951 } 3952 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), 3953 DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, AnyExt)); 3954} 3955 3956// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 3957// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 3958// one of the above mentioned nodes. It has to be wrapped because otherwise 3959// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 3960// be used to form addressing mode. These wrapped nodes will be selected 3961// into MOV32ri. 3962SDOperand 3963X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 3964 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 3965 SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(), 3966 getPointerTy(), 3967 CP->getAlignment()); 3968 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3969 // With PIC, the address is actually $g + Offset. 3970 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3971 !Subtarget->isPICStyleRIPRel()) { 3972 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3973 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3974 Result); 3975 } 3976 3977 return Result; 3978} 3979 3980SDOperand 3981X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 3982 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3983 SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); 3984 // If it's a debug information descriptor, don't mess with it. 3985 if (DAG.isVerifiedDebugInfoDesc(Op)) 3986 return Result; 3987 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3988 // With PIC, the address is actually $g + Offset. 3989 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3990 !Subtarget->isPICStyleRIPRel()) { 3991 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3992 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3993 Result); 3994 } 3995 3996 // For Darwin & Mingw32, external and weak symbols are indirect, so we want to 3997 // load the value at address GV, not the value of GV itself. This means that 3998 // the GlobalAddress must be in the base or index register of the address, not 3999 // the GV offset field. Platform check is inside GVRequiresExtraLoad() call 4000 // The same applies for external symbols during PIC codegen 4001 if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)) 4002 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, 4003 PseudoSourceValue::getGOT(), 0); 4004 4005 return Result; 4006} 4007 4008// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit 4009static SDOperand 4010LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, 4011 const MVT::ValueType PtrVT) { 4012 SDOperand InFlag; 4013 SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX, 4014 DAG.getNode(X86ISD::GlobalBaseReg, 4015 PtrVT), InFlag); 4016 InFlag = Chain.getValue(1); 4017 4018 // emit leal symbol@TLSGD(,%ebx,1), %eax 4019 SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag); 4020 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 4021 GA->getValueType(0), 4022 GA->getOffset()); 4023 SDOperand Ops[] = { Chain, TGA, InFlag }; 4024 SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3); 4025 InFlag = Result.getValue(2); 4026 Chain = Result.getValue(1); 4027 4028 // call ___tls_get_addr. This function receives its argument in 4029 // the register EAX. 4030 Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag); 4031 InFlag = Chain.getValue(1); 4032 4033 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4034 SDOperand Ops1[] = { Chain, 4035 DAG.getTargetExternalSymbol("___tls_get_addr", 4036 PtrVT), 4037 DAG.getRegister(X86::EAX, PtrVT), 4038 DAG.getRegister(X86::EBX, PtrVT), 4039 InFlag }; 4040 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5); 4041 InFlag = Chain.getValue(1); 4042 4043 return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag); 4044} 4045 4046// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit 4047static SDOperand 4048LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, 4049 const MVT::ValueType PtrVT) { 4050 SDOperand InFlag, Chain; 4051 4052 // emit leaq symbol@TLSGD(%rip), %rdi 4053 SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag); 4054 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 4055 GA->getValueType(0), 4056 GA->getOffset()); 4057 SDOperand Ops[] = { DAG.getEntryNode(), TGA}; 4058 SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 2); 4059 Chain = Result.getValue(1); 4060 InFlag = Result.getValue(2); 4061 4062 // call ___tls_get_addr. This function receives its argument in 4063 // the register RDI. 4064 Chain = DAG.getCopyToReg(Chain, X86::RDI, Result, InFlag); 4065 InFlag = Chain.getValue(1); 4066 4067 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4068 SDOperand Ops1[] = { Chain, 4069 DAG.getTargetExternalSymbol("___tls_get_addr", 4070 PtrVT), 4071 DAG.getRegister(X86::RDI, PtrVT), 4072 InFlag }; 4073 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 4); 4074 InFlag = Chain.getValue(1); 4075 4076 return DAG.getCopyFromReg(Chain, X86::RAX, PtrVT, InFlag); 4077} 4078 4079// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 4080// "local exec" model. 4081static SDOperand 4082LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 4083 const MVT::ValueType PtrVT) { 4084 // Get the Thread Pointer 4085 SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT); 4086 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 4087 // exec) 4088 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 4089 GA->getValueType(0), 4090 GA->getOffset()); 4091 SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA); 4092 4093 if (GA->getGlobal()->isDeclaration()) // initial exec TLS model 4094 Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, 4095 PseudoSourceValue::getGOT(), 0); 4096 4097 // The address of the thread local variable is the add of the thread 4098 // pointer with the offset of the variable. 4099 return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset); 4100} 4101 4102SDOperand 4103X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { 4104 // TODO: implement the "local dynamic" model 4105 // TODO: implement the "initial exec"model for pic executables 4106 assert(Subtarget->isTargetELF() && 4107 "TLS not implemented for non-ELF targets"); 4108 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 4109 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 4110 // otherwise use the "Local Exec"TLS Model 4111 if (Subtarget->is64Bit()) { 4112 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy()); 4113 } else { 4114 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 4115 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy()); 4116 else 4117 return LowerToTLSExecModel(GA, DAG, getPointerTy()); 4118 } 4119} 4120 4121SDOperand 4122X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { 4123 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 4124 SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 4125 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4126 // With PIC, the address is actually $g + Offset. 4127 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4128 !Subtarget->isPICStyleRIPRel()) { 4129 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4130 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4131 Result); 4132 } 4133 4134 return Result; 4135} 4136 4137SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 4138 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 4139 SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); 4140 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4141 // With PIC, the address is actually $g + Offset. 4142 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4143 !Subtarget->isPICStyleRIPRel()) { 4144 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4145 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4146 Result); 4147 } 4148 4149 return Result; 4150} 4151 4152/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and 4153/// take a 2 x i32 value to shift plus a shift amount. 4154SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { 4155 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 4156 MVT::ValueType VT = Op.getValueType(); 4157 unsigned VTBits = MVT::getSizeInBits(VT); 4158 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 4159 SDOperand ShOpLo = Op.getOperand(0); 4160 SDOperand ShOpHi = Op.getOperand(1); 4161 SDOperand ShAmt = Op.getOperand(2); 4162 SDOperand Tmp1 = isSRA ? 4163 DAG.getNode(ISD::SRA, VT, ShOpHi, DAG.getConstant(VTBits - 1, MVT::i8)) : 4164 DAG.getConstant(0, VT); 4165 4166 SDOperand Tmp2, Tmp3; 4167 if (Op.getOpcode() == ISD::SHL_PARTS) { 4168 Tmp2 = DAG.getNode(X86ISD::SHLD, VT, ShOpHi, ShOpLo, ShAmt); 4169 Tmp3 = DAG.getNode(ISD::SHL, VT, ShOpLo, ShAmt); 4170 } else { 4171 Tmp2 = DAG.getNode(X86ISD::SHRD, VT, ShOpLo, ShOpHi, ShAmt); 4172 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, VT, ShOpHi, ShAmt); 4173 } 4174 4175 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4176 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, 4177 DAG.getConstant(VTBits, MVT::i8)); 4178 SDOperand Cond = DAG.getNode(X86ISD::CMP, VT, 4179 AndNode, DAG.getConstant(0, MVT::i8)); 4180 4181 SDOperand Hi, Lo; 4182 SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4183 VTs = DAG.getNodeValueTypes(VT, MVT::Flag); 4184 SmallVector<SDOperand, 4> Ops; 4185 if (Op.getOpcode() == ISD::SHL_PARTS) { 4186 Ops.push_back(Tmp2); 4187 Ops.push_back(Tmp3); 4188 Ops.push_back(CC); 4189 Ops.push_back(Cond); 4190 Hi = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4191 4192 Ops.clear(); 4193 Ops.push_back(Tmp3); 4194 Ops.push_back(Tmp1); 4195 Ops.push_back(CC); 4196 Ops.push_back(Cond); 4197 Lo = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4198 } else { 4199 Ops.push_back(Tmp2); 4200 Ops.push_back(Tmp3); 4201 Ops.push_back(CC); 4202 Ops.push_back(Cond); 4203 Lo = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4204 4205 Ops.clear(); 4206 Ops.push_back(Tmp3); 4207 Ops.push_back(Tmp1); 4208 Ops.push_back(CC); 4209 Ops.push_back(Cond); 4210 Hi = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4211 } 4212 4213 VTs = DAG.getNodeValueTypes(VT, VT); 4214 Ops.clear(); 4215 Ops.push_back(Lo); 4216 Ops.push_back(Hi); 4217 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size()); 4218} 4219 4220SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 4221 MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); 4222 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 && 4223 "Unknown SINT_TO_FP to lower!"); 4224 4225 // These are really Legal; caller falls through into that case. 4226 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 4227 return SDOperand(); 4228 if (SrcVT == MVT::i64 && Op.getValueType() != MVT::f80 && 4229 Subtarget->is64Bit()) 4230 return SDOperand(); 4231 4232 unsigned Size = MVT::getSizeInBits(SrcVT)/8; 4233 MachineFunction &MF = DAG.getMachineFunction(); 4234 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 4235 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4236 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), 4237 StackSlot, 4238 PseudoSourceValue::getFixedStack(), 4239 SSFI); 4240 4241 // Build the FILD 4242 SDVTList Tys; 4243 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 4244 if (useSSE) 4245 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 4246 else 4247 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 4248 SmallVector<SDOperand, 8> Ops; 4249 Ops.push_back(Chain); 4250 Ops.push_back(StackSlot); 4251 Ops.push_back(DAG.getValueType(SrcVT)); 4252 SDOperand Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, 4253 Tys, &Ops[0], Ops.size()); 4254 4255 if (useSSE) { 4256 Chain = Result.getValue(1); 4257 SDOperand InFlag = Result.getValue(2); 4258 4259 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 4260 // shouldn't be necessary except that RFP cannot be live across 4261 // multiple blocks. When stackifier is fixed, they can be uncoupled. 4262 MachineFunction &MF = DAG.getMachineFunction(); 4263 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 4264 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4265 Tys = DAG.getVTList(MVT::Other); 4266 SmallVector<SDOperand, 8> Ops; 4267 Ops.push_back(Chain); 4268 Ops.push_back(Result); 4269 Ops.push_back(StackSlot); 4270 Ops.push_back(DAG.getValueType(Op.getValueType())); 4271 Ops.push_back(InFlag); 4272 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 4273 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, 4274 PseudoSourceValue::getFixedStack(), SSFI); 4275 } 4276 4277 return Result; 4278} 4279 4280std::pair<SDOperand,SDOperand> X86TargetLowering:: 4281FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) { 4282 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 && 4283 "Unknown FP_TO_SINT to lower!"); 4284 4285 // These are really Legal. 4286 if (Op.getValueType() == MVT::i32 && 4287 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 4288 return std::make_pair(SDOperand(), SDOperand()); 4289 if (Subtarget->is64Bit() && 4290 Op.getValueType() == MVT::i64 && 4291 Op.getOperand(0).getValueType() != MVT::f80) 4292 return std::make_pair(SDOperand(), SDOperand()); 4293 4294 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 4295 // stack slot. 4296 MachineFunction &MF = DAG.getMachineFunction(); 4297 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; 4298 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4299 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4300 unsigned Opc; 4301 switch (Op.getValueType()) { 4302 default: assert(0 && "Invalid FP_TO_SINT to lower!"); 4303 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 4304 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 4305 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 4306 } 4307 4308 SDOperand Chain = DAG.getEntryNode(); 4309 SDOperand Value = Op.getOperand(0); 4310 if (isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) { 4311 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 4312 Chain = DAG.getStore(Chain, Value, StackSlot, 4313 PseudoSourceValue::getFixedStack(), SSFI); 4314 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 4315 SDOperand Ops[] = { 4316 Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType()) 4317 }; 4318 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 4319 Chain = Value.getValue(1); 4320 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4321 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4322 } 4323 4324 // Build the FP_TO_INT*_IN_MEM 4325 SDOperand Ops[] = { Chain, Value, StackSlot }; 4326 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3); 4327 4328 return std::make_pair(FIST, StackSlot); 4329} 4330 4331SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 4332 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(Op, DAG); 4333 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4334 if (FIST.Val == 0) return SDOperand(); 4335 4336 // Load the result. 4337 return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0); 4338} 4339 4340SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) { 4341 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(SDOperand(N, 0), DAG); 4342 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4343 if (FIST.Val == 0) return 0; 4344 4345 // Return an i64 load from the stack slot. 4346 SDOperand Res = DAG.getLoad(MVT::i64, FIST, StackSlot, NULL, 0); 4347 4348 // Use a MERGE_VALUES node to drop the chain result value. 4349 return DAG.getNode(ISD::MERGE_VALUES, MVT::i64, Res).Val; 4350} 4351 4352SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { 4353 MVT::ValueType VT = Op.getValueType(); 4354 MVT::ValueType EltVT = VT; 4355 if (MVT::isVector(VT)) 4356 EltVT = MVT::getVectorElementType(VT); 4357 std::vector<Constant*> CV; 4358 if (EltVT == MVT::f64) { 4359 Constant *C = ConstantFP::get(APFloat(APInt(64, ~(1ULL << 63)))); 4360 CV.push_back(C); 4361 CV.push_back(C); 4362 } else { 4363 Constant *C = ConstantFP::get(APFloat(APInt(32, ~(1U << 31)))); 4364 CV.push_back(C); 4365 CV.push_back(C); 4366 CV.push_back(C); 4367 CV.push_back(C); 4368 } 4369 Constant *C = ConstantVector::get(CV); 4370 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4371 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4372 PseudoSourceValue::getConstantPool(), 0, 4373 false, 16); 4374 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask); 4375} 4376 4377SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { 4378 MVT::ValueType VT = Op.getValueType(); 4379 MVT::ValueType EltVT = VT; 4380 unsigned EltNum = 1; 4381 if (MVT::isVector(VT)) { 4382 EltVT = MVT::getVectorElementType(VT); 4383 EltNum = MVT::getVectorNumElements(VT); 4384 } 4385 std::vector<Constant*> CV; 4386 if (EltVT == MVT::f64) { 4387 Constant *C = ConstantFP::get(APFloat(APInt(64, 1ULL << 63))); 4388 CV.push_back(C); 4389 CV.push_back(C); 4390 } else { 4391 Constant *C = ConstantFP::get(APFloat(APInt(32, 1U << 31))); 4392 CV.push_back(C); 4393 CV.push_back(C); 4394 CV.push_back(C); 4395 CV.push_back(C); 4396 } 4397 Constant *C = ConstantVector::get(CV); 4398 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4399 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4400 PseudoSourceValue::getConstantPool(), 0, 4401 false, 16); 4402 if (MVT::isVector(VT)) { 4403 return DAG.getNode(ISD::BIT_CONVERT, VT, 4404 DAG.getNode(ISD::XOR, MVT::v2i64, 4405 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)), 4406 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Mask))); 4407 } else { 4408 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask); 4409 } 4410} 4411 4412SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { 4413 SDOperand Op0 = Op.getOperand(0); 4414 SDOperand Op1 = Op.getOperand(1); 4415 MVT::ValueType VT = Op.getValueType(); 4416 MVT::ValueType SrcVT = Op1.getValueType(); 4417 4418 // If second operand is smaller, extend it first. 4419 if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) { 4420 Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1); 4421 SrcVT = VT; 4422 } 4423 // And if it is bigger, shrink it first. 4424 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4425 Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1, DAG.getIntPtrConstant(1)); 4426 SrcVT = VT; 4427 } 4428 4429 // At this point the operands and the result should have the same 4430 // type, and that won't be f80 since that is not custom lowered. 4431 4432 // First get the sign bit of second operand. 4433 std::vector<Constant*> CV; 4434 if (SrcVT == MVT::f64) { 4435 CV.push_back(ConstantFP::get(APFloat(APInt(64, 1ULL << 63)))); 4436 CV.push_back(ConstantFP::get(APFloat(APInt(64, 0)))); 4437 } else { 4438 CV.push_back(ConstantFP::get(APFloat(APInt(32, 1U << 31)))); 4439 CV.push_back(ConstantFP::get(APFloat(APInt(32, 0)))); 4440 CV.push_back(ConstantFP::get(APFloat(APInt(32, 0)))); 4441 CV.push_back(ConstantFP::get(APFloat(APInt(32, 0)))); 4442 } 4443 Constant *C = ConstantVector::get(CV); 4444 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4445 SDOperand Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx, 4446 PseudoSourceValue::getConstantPool(), 0, 4447 false, 16); 4448 SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); 4449 4450 // Shift sign bit right or left if the two operands have different types. 4451 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4452 // Op0 is MVT::f32, Op1 is MVT::f64. 4453 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit); 4454 SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit, 4455 DAG.getConstant(32, MVT::i32)); 4456 SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit); 4457 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit, 4458 DAG.getIntPtrConstant(0)); 4459 } 4460 4461 // Clear first operand sign bit. 4462 CV.clear(); 4463 if (VT == MVT::f64) { 4464 CV.push_back(ConstantFP::get(APFloat(APInt(64, ~(1ULL << 63))))); 4465 CV.push_back(ConstantFP::get(APFloat(APInt(64, 0)))); 4466 } else { 4467 CV.push_back(ConstantFP::get(APFloat(APInt(32, ~(1U << 31))))); 4468 CV.push_back(ConstantFP::get(APFloat(APInt(32, 0)))); 4469 CV.push_back(ConstantFP::get(APFloat(APInt(32, 0)))); 4470 CV.push_back(ConstantFP::get(APFloat(APInt(32, 0)))); 4471 } 4472 C = ConstantVector::get(CV); 4473 CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4474 SDOperand Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4475 PseudoSourceValue::getConstantPool(), 0, 4476 false, 16); 4477 SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2); 4478 4479 // Or the value with the sign bit. 4480 return DAG.getNode(X86ISD::FOR, VT, Val, SignBit); 4481} 4482 4483SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { 4484 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 4485 SDOperand Cond; 4486 SDOperand Op0 = Op.getOperand(0); 4487 SDOperand Op1 = Op.getOperand(1); 4488 SDOperand CC = Op.getOperand(2); 4489 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 4490 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); 4491 unsigned X86CC; 4492 4493 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, 4494 Op0, Op1, DAG)) { 4495 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4496 return DAG.getNode(X86ISD::SETCC, MVT::i8, 4497 DAG.getConstant(X86CC, MVT::i8), Cond); 4498 } 4499 4500 assert(isFP && "Illegal integer SetCC!"); 4501 4502 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4503 switch (SetCCOpcode) { 4504 default: assert(false && "Illegal floating point SetCC!"); 4505 case ISD::SETOEQ: { // !PF & ZF 4506 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4507 DAG.getConstant(X86::COND_NP, MVT::i8), Cond); 4508 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4509 DAG.getConstant(X86::COND_E, MVT::i8), Cond); 4510 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); 4511 } 4512 case ISD::SETUNE: { // PF | !ZF 4513 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4514 DAG.getConstant(X86::COND_P, MVT::i8), Cond); 4515 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4516 DAG.getConstant(X86::COND_NE, MVT::i8), Cond); 4517 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); 4518 } 4519 } 4520} 4521 4522 4523SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { 4524 bool addTest = true; 4525 SDOperand Cond = Op.getOperand(0); 4526 SDOperand CC; 4527 4528 if (Cond.getOpcode() == ISD::SETCC) 4529 Cond = LowerSETCC(Cond, DAG); 4530 4531 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4532 // setting operand in place of the X86ISD::SETCC. 4533 if (Cond.getOpcode() == X86ISD::SETCC) { 4534 CC = Cond.getOperand(0); 4535 4536 SDOperand Cmp = Cond.getOperand(1); 4537 unsigned Opc = Cmp.getOpcode(); 4538 MVT::ValueType VT = Op.getValueType(); 4539 4540 bool IllegalFPCMov = false; 4541 if (MVT::isFloatingPoint(VT) && !MVT::isVector(VT) && 4542 !isScalarFPTypeInSSEReg(VT)) // FPStack? 4543 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 4544 4545 if ((Opc == X86ISD::CMP || 4546 Opc == X86ISD::COMI || 4547 Opc == X86ISD::UCOMI) && !IllegalFPCMov) { 4548 Cond = Cmp; 4549 addTest = false; 4550 } 4551 } 4552 4553 if (addTest) { 4554 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4555 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4556 } 4557 4558 const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(), 4559 MVT::Flag); 4560 SmallVector<SDOperand, 4> Ops; 4561 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 4562 // condition is true. 4563 Ops.push_back(Op.getOperand(2)); 4564 Ops.push_back(Op.getOperand(1)); 4565 Ops.push_back(CC); 4566 Ops.push_back(Cond); 4567 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 4568} 4569 4570SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { 4571 bool addTest = true; 4572 SDOperand Chain = Op.getOperand(0); 4573 SDOperand Cond = Op.getOperand(1); 4574 SDOperand Dest = Op.getOperand(2); 4575 SDOperand CC; 4576 4577 if (Cond.getOpcode() == ISD::SETCC) 4578 Cond = LowerSETCC(Cond, DAG); 4579 4580 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4581 // setting operand in place of the X86ISD::SETCC. 4582 if (Cond.getOpcode() == X86ISD::SETCC) { 4583 CC = Cond.getOperand(0); 4584 4585 SDOperand Cmp = Cond.getOperand(1); 4586 unsigned Opc = Cmp.getOpcode(); 4587 if (Opc == X86ISD::CMP || 4588 Opc == X86ISD::COMI || 4589 Opc == X86ISD::UCOMI) { 4590 Cond = Cmp; 4591 addTest = false; 4592 } 4593 } 4594 4595 if (addTest) { 4596 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4597 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4598 } 4599 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), 4600 Chain, Op.getOperand(2), CC, Cond); 4601} 4602 4603 4604// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 4605// Calls to _alloca is needed to probe the stack when allocating more than 4k 4606// bytes in one go. Touching the stack at 4K increments is necessary to ensure 4607// that the guard pages used by the OS virtual memory manager are allocated in 4608// correct sequence. 4609SDOperand 4610X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, 4611 SelectionDAG &DAG) { 4612 assert(Subtarget->isTargetCygMing() && 4613 "This should be used only on Cygwin/Mingw targets"); 4614 4615 // Get the inputs. 4616 SDOperand Chain = Op.getOperand(0); 4617 SDOperand Size = Op.getOperand(1); 4618 // FIXME: Ensure alignment here 4619 4620 SDOperand Flag; 4621 4622 MVT::ValueType IntPtr = getPointerTy(); 4623 MVT::ValueType SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; 4624 4625 Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag); 4626 Flag = Chain.getValue(1); 4627 4628 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4629 SDOperand Ops[] = { Chain, 4630 DAG.getTargetExternalSymbol("_alloca", IntPtr), 4631 DAG.getRegister(X86::EAX, IntPtr), 4632 Flag }; 4633 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4); 4634 Flag = Chain.getValue(1); 4635 4636 Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1); 4637 4638 std::vector<MVT::ValueType> Tys; 4639 Tys.push_back(SPTy); 4640 Tys.push_back(MVT::Other); 4641 SDOperand Ops1[2] = { Chain.getValue(0), Chain }; 4642 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2); 4643} 4644 4645SDOperand 4646X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, 4647 SDOperand Chain, 4648 SDOperand Dst, SDOperand Src, 4649 SDOperand Size, unsigned Align, 4650 const Value *DstSV, uint64_t DstSVOff) { 4651 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 4652 4653 /// If not DWORD aligned or size is more than the threshold, call the library. 4654 /// The libc version is likely to be faster for these cases. It can use the 4655 /// address value and run time information about the CPU. 4656 if ((Align & 3) == 0 || 4657 !ConstantSize || 4658 ConstantSize->getValue() > getSubtarget()->getMaxInlineSizeThreshold()) { 4659 SDOperand InFlag(0, 0); 4660 4661 // Check to see if there is a specialized entry-point for memory zeroing. 4662 ConstantSDNode *V = dyn_cast<ConstantSDNode>(Src); 4663 if (const char *bzeroEntry = 4664 V && V->isNullValue() ? Subtarget->getBZeroEntry() : 0) { 4665 MVT::ValueType IntPtr = getPointerTy(); 4666 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 4667 TargetLowering::ArgListTy Args; 4668 TargetLowering::ArgListEntry Entry; 4669 Entry.Node = Dst; 4670 Entry.Ty = IntPtrTy; 4671 Args.push_back(Entry); 4672 Entry.Node = Size; 4673 Args.push_back(Entry); 4674 std::pair<SDOperand,SDOperand> CallResult = 4675 LowerCallTo(Chain, Type::VoidTy, false, false, false, CallingConv::C, 4676 false, DAG.getExternalSymbol(bzeroEntry, IntPtr), 4677 Args, DAG); 4678 return CallResult.second; 4679 } 4680 4681 // Otherwise have the target-independent code call memset. 4682 return SDOperand(); 4683 } 4684 4685 uint64_t SizeVal = ConstantSize->getValue(); 4686 SDOperand InFlag(0, 0); 4687 MVT::ValueType AVT; 4688 SDOperand Count; 4689 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Src); 4690 unsigned BytesLeft = 0; 4691 bool TwoRepStos = false; 4692 if (ValC) { 4693 unsigned ValReg; 4694 uint64_t Val = ValC->getValue() & 255; 4695 4696 // If the value is a constant, then we can potentially use larger sets. 4697 switch (Align & 3) { 4698 case 2: // WORD aligned 4699 AVT = MVT::i16; 4700 ValReg = X86::AX; 4701 Val = (Val << 8) | Val; 4702 break; 4703 case 0: // DWORD aligned 4704 AVT = MVT::i32; 4705 ValReg = X86::EAX; 4706 Val = (Val << 8) | Val; 4707 Val = (Val << 16) | Val; 4708 if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) { // QWORD aligned 4709 AVT = MVT::i64; 4710 ValReg = X86::RAX; 4711 Val = (Val << 32) | Val; 4712 } 4713 break; 4714 default: // Byte aligned 4715 AVT = MVT::i8; 4716 ValReg = X86::AL; 4717 Count = DAG.getIntPtrConstant(SizeVal); 4718 break; 4719 } 4720 4721 if (AVT > MVT::i8) { 4722 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4723 Count = DAG.getIntPtrConstant(SizeVal / UBytes); 4724 BytesLeft = SizeVal % UBytes; 4725 } 4726 4727 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT), 4728 InFlag); 4729 InFlag = Chain.getValue(1); 4730 } else { 4731 AVT = MVT::i8; 4732 Count = DAG.getIntPtrConstant(SizeVal); 4733 Chain = DAG.getCopyToReg(Chain, X86::AL, Src, InFlag); 4734 InFlag = Chain.getValue(1); 4735 } 4736 4737 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4738 Count, InFlag); 4739 InFlag = Chain.getValue(1); 4740 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4741 Dst, InFlag); 4742 InFlag = Chain.getValue(1); 4743 4744 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4745 SmallVector<SDOperand, 8> Ops; 4746 Ops.push_back(Chain); 4747 Ops.push_back(DAG.getValueType(AVT)); 4748 Ops.push_back(InFlag); 4749 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4750 4751 if (TwoRepStos) { 4752 InFlag = Chain.getValue(1); 4753 Count = Size; 4754 MVT::ValueType CVT = Count.getValueType(); 4755 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4756 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4757 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4758 Left, InFlag); 4759 InFlag = Chain.getValue(1); 4760 Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4761 Ops.clear(); 4762 Ops.push_back(Chain); 4763 Ops.push_back(DAG.getValueType(MVT::i8)); 4764 Ops.push_back(InFlag); 4765 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4766 } else if (BytesLeft) { 4767 // Handle the last 1 - 7 bytes. 4768 unsigned Offset = SizeVal - BytesLeft; 4769 MVT::ValueType AddrVT = Dst.getValueType(); 4770 MVT::ValueType SizeVT = Size.getValueType(); 4771 4772 Chain = DAG.getMemset(Chain, 4773 DAG.getNode(ISD::ADD, AddrVT, Dst, 4774 DAG.getConstant(Offset, AddrVT)), 4775 Src, 4776 DAG.getConstant(BytesLeft, SizeVT), 4777 Align, DstSV, DstSVOff + Offset); 4778 } 4779 4780 // TODO: Use a Tokenfactor, as in memcpy, instead of a single chain. 4781 return Chain; 4782} 4783 4784SDOperand 4785X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, 4786 SDOperand Chain, 4787 SDOperand Dst, SDOperand Src, 4788 SDOperand Size, unsigned Align, 4789 bool AlwaysInline, 4790 const Value *DstSV, uint64_t DstSVOff, 4791 const Value *SrcSV, uint64_t SrcSVOff){ 4792 4793 // This requires the copy size to be a constant, preferrably 4794 // within a subtarget-specific limit. 4795 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 4796 if (!ConstantSize) 4797 return SDOperand(); 4798 uint64_t SizeVal = ConstantSize->getValue(); 4799 if (!AlwaysInline && SizeVal > getSubtarget()->getMaxInlineSizeThreshold()) 4800 return SDOperand(); 4801 4802 MVT::ValueType AVT; 4803 unsigned BytesLeft = 0; 4804 if (Align >= 8 && Subtarget->is64Bit()) 4805 AVT = MVT::i64; 4806 else if (Align >= 4) 4807 AVT = MVT::i32; 4808 else if (Align >= 2) 4809 AVT = MVT::i16; 4810 else 4811 AVT = MVT::i8; 4812 4813 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4814 unsigned CountVal = SizeVal / UBytes; 4815 SDOperand Count = DAG.getIntPtrConstant(CountVal); 4816 BytesLeft = SizeVal % UBytes; 4817 4818 SDOperand InFlag(0, 0); 4819 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4820 Count, InFlag); 4821 InFlag = Chain.getValue(1); 4822 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4823 Dst, InFlag); 4824 InFlag = Chain.getValue(1); 4825 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI, 4826 Src, InFlag); 4827 InFlag = Chain.getValue(1); 4828 4829 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4830 SmallVector<SDOperand, 8> Ops; 4831 Ops.push_back(Chain); 4832 Ops.push_back(DAG.getValueType(AVT)); 4833 Ops.push_back(InFlag); 4834 SDOperand RepMovs = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4835 4836 SmallVector<SDOperand, 4> Results; 4837 Results.push_back(RepMovs); 4838 if (BytesLeft) { 4839 // Handle the last 1 - 7 bytes. 4840 unsigned Offset = SizeVal - BytesLeft; 4841 MVT::ValueType DstVT = Dst.getValueType(); 4842 MVT::ValueType SrcVT = Src.getValueType(); 4843 MVT::ValueType SizeVT = Size.getValueType(); 4844 Results.push_back(DAG.getMemcpy(Chain, 4845 DAG.getNode(ISD::ADD, DstVT, Dst, 4846 DAG.getConstant(Offset, DstVT)), 4847 DAG.getNode(ISD::ADD, SrcVT, Src, 4848 DAG.getConstant(Offset, SrcVT)), 4849 DAG.getConstant(BytesLeft, SizeVT), 4850 Align, AlwaysInline, 4851 DstSV, DstSVOff + Offset, 4852 SrcSV, SrcSVOff + Offset)); 4853 } 4854 4855 return DAG.getNode(ISD::TokenFactor, MVT::Other, &Results[0], Results.size()); 4856} 4857 4858/// Expand the result of: i64,outchain = READCYCLECOUNTER inchain 4859SDNode *X86TargetLowering::ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG){ 4860 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4861 SDOperand TheChain = N->getOperand(0); 4862 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheChain, 1); 4863 if (Subtarget->is64Bit()) { 4864 SDOperand rax = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); 4865 SDOperand rdx = DAG.getCopyFromReg(rax.getValue(1), X86::RDX, 4866 MVT::i64, rax.getValue(2)); 4867 SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, rdx, 4868 DAG.getConstant(32, MVT::i8)); 4869 SDOperand Ops[] = { 4870 DAG.getNode(ISD::OR, MVT::i64, rax, Tmp), rdx.getValue(1) 4871 }; 4872 4873 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4874 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4875 } 4876 4877 SDOperand eax = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); 4878 SDOperand edx = DAG.getCopyFromReg(eax.getValue(1), X86::EDX, 4879 MVT::i32, eax.getValue(2)); 4880 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 4881 SDOperand Ops[] = { eax, edx }; 4882 Ops[0] = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Ops, 2); 4883 4884 // Use a MERGE_VALUES to return the value and chain. 4885 Ops[1] = edx.getValue(1); 4886 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4887 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4888} 4889 4890SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { 4891 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 4892 4893 if (!Subtarget->is64Bit()) { 4894 // vastart just stores the address of the VarArgsFrameIndex slot into the 4895 // memory location argument. 4896 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4897 return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV, 0); 4898 } 4899 4900 // __va_list_tag: 4901 // gp_offset (0 - 6 * 8) 4902 // fp_offset (48 - 48 + 8 * 16) 4903 // overflow_arg_area (point to parameters coming in memory). 4904 // reg_save_area 4905 SmallVector<SDOperand, 8> MemOps; 4906 SDOperand FIN = Op.getOperand(1); 4907 // Store gp_offset 4908 SDOperand Store = DAG.getStore(Op.getOperand(0), 4909 DAG.getConstant(VarArgsGPOffset, MVT::i32), 4910 FIN, SV, 0); 4911 MemOps.push_back(Store); 4912 4913 // Store fp_offset 4914 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4915 Store = DAG.getStore(Op.getOperand(0), 4916 DAG.getConstant(VarArgsFPOffset, MVT::i32), 4917 FIN, SV, 0); 4918 MemOps.push_back(Store); 4919 4920 // Store ptr to overflow_arg_area 4921 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4922 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4923 Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV, 0); 4924 MemOps.push_back(Store); 4925 4926 // Store ptr to reg_save_area. 4927 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); 4928 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 4929 Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV, 0); 4930 MemOps.push_back(Store); 4931 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size()); 4932} 4933 4934SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) { 4935 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 4936 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!"); 4937 SDOperand Chain = Op.getOperand(0); 4938 SDOperand DstPtr = Op.getOperand(1); 4939 SDOperand SrcPtr = Op.getOperand(2); 4940 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 4941 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 4942 4943 return DAG.getMemcpy(Chain, DstPtr, SrcPtr, 4944 DAG.getIntPtrConstant(24), 8, false, 4945 DstSV, 0, SrcSV, 0); 4946} 4947 4948SDOperand 4949X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 4950 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); 4951 switch (IntNo) { 4952 default: return SDOperand(); // Don't custom lower most intrinsics. 4953 // Comparison intrinsics. 4954 case Intrinsic::x86_sse_comieq_ss: 4955 case Intrinsic::x86_sse_comilt_ss: 4956 case Intrinsic::x86_sse_comile_ss: 4957 case Intrinsic::x86_sse_comigt_ss: 4958 case Intrinsic::x86_sse_comige_ss: 4959 case Intrinsic::x86_sse_comineq_ss: 4960 case Intrinsic::x86_sse_ucomieq_ss: 4961 case Intrinsic::x86_sse_ucomilt_ss: 4962 case Intrinsic::x86_sse_ucomile_ss: 4963 case Intrinsic::x86_sse_ucomigt_ss: 4964 case Intrinsic::x86_sse_ucomige_ss: 4965 case Intrinsic::x86_sse_ucomineq_ss: 4966 case Intrinsic::x86_sse2_comieq_sd: 4967 case Intrinsic::x86_sse2_comilt_sd: 4968 case Intrinsic::x86_sse2_comile_sd: 4969 case Intrinsic::x86_sse2_comigt_sd: 4970 case Intrinsic::x86_sse2_comige_sd: 4971 case Intrinsic::x86_sse2_comineq_sd: 4972 case Intrinsic::x86_sse2_ucomieq_sd: 4973 case Intrinsic::x86_sse2_ucomilt_sd: 4974 case Intrinsic::x86_sse2_ucomile_sd: 4975 case Intrinsic::x86_sse2_ucomigt_sd: 4976 case Intrinsic::x86_sse2_ucomige_sd: 4977 case Intrinsic::x86_sse2_ucomineq_sd: { 4978 unsigned Opc = 0; 4979 ISD::CondCode CC = ISD::SETCC_INVALID; 4980 switch (IntNo) { 4981 default: break; 4982 case Intrinsic::x86_sse_comieq_ss: 4983 case Intrinsic::x86_sse2_comieq_sd: 4984 Opc = X86ISD::COMI; 4985 CC = ISD::SETEQ; 4986 break; 4987 case Intrinsic::x86_sse_comilt_ss: 4988 case Intrinsic::x86_sse2_comilt_sd: 4989 Opc = X86ISD::COMI; 4990 CC = ISD::SETLT; 4991 break; 4992 case Intrinsic::x86_sse_comile_ss: 4993 case Intrinsic::x86_sse2_comile_sd: 4994 Opc = X86ISD::COMI; 4995 CC = ISD::SETLE; 4996 break; 4997 case Intrinsic::x86_sse_comigt_ss: 4998 case Intrinsic::x86_sse2_comigt_sd: 4999 Opc = X86ISD::COMI; 5000 CC = ISD::SETGT; 5001 break; 5002 case Intrinsic::x86_sse_comige_ss: 5003 case Intrinsic::x86_sse2_comige_sd: 5004 Opc = X86ISD::COMI; 5005 CC = ISD::SETGE; 5006 break; 5007 case Intrinsic::x86_sse_comineq_ss: 5008 case Intrinsic::x86_sse2_comineq_sd: 5009 Opc = X86ISD::COMI; 5010 CC = ISD::SETNE; 5011 break; 5012 case Intrinsic::x86_sse_ucomieq_ss: 5013 case Intrinsic::x86_sse2_ucomieq_sd: 5014 Opc = X86ISD::UCOMI; 5015 CC = ISD::SETEQ; 5016 break; 5017 case Intrinsic::x86_sse_ucomilt_ss: 5018 case Intrinsic::x86_sse2_ucomilt_sd: 5019 Opc = X86ISD::UCOMI; 5020 CC = ISD::SETLT; 5021 break; 5022 case Intrinsic::x86_sse_ucomile_ss: 5023 case Intrinsic::x86_sse2_ucomile_sd: 5024 Opc = X86ISD::UCOMI; 5025 CC = ISD::SETLE; 5026 break; 5027 case Intrinsic::x86_sse_ucomigt_ss: 5028 case Intrinsic::x86_sse2_ucomigt_sd: 5029 Opc = X86ISD::UCOMI; 5030 CC = ISD::SETGT; 5031 break; 5032 case Intrinsic::x86_sse_ucomige_ss: 5033 case Intrinsic::x86_sse2_ucomige_sd: 5034 Opc = X86ISD::UCOMI; 5035 CC = ISD::SETGE; 5036 break; 5037 case Intrinsic::x86_sse_ucomineq_ss: 5038 case Intrinsic::x86_sse2_ucomineq_sd: 5039 Opc = X86ISD::UCOMI; 5040 CC = ISD::SETNE; 5041 break; 5042 } 5043 5044 unsigned X86CC; 5045 SDOperand LHS = Op.getOperand(1); 5046 SDOperand RHS = Op.getOperand(2); 5047 translateX86CC(CC, true, X86CC, LHS, RHS, DAG); 5048 5049 SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS); 5050 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8, 5051 DAG.getConstant(X86CC, MVT::i8), Cond); 5052 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); 5053 } 5054 5055 // Fix vector shift instructions where the last operand is a non-immediate 5056 // i32 value. 5057 case Intrinsic::x86_sse2_pslli_w: 5058 case Intrinsic::x86_sse2_pslli_d: 5059 case Intrinsic::x86_sse2_pslli_q: 5060 case Intrinsic::x86_sse2_psrli_w: 5061 case Intrinsic::x86_sse2_psrli_d: 5062 case Intrinsic::x86_sse2_psrli_q: 5063 case Intrinsic::x86_sse2_psrai_w: 5064 case Intrinsic::x86_sse2_psrai_d: 5065 case Intrinsic::x86_mmx_pslli_w: 5066 case Intrinsic::x86_mmx_pslli_d: 5067 case Intrinsic::x86_mmx_pslli_q: 5068 case Intrinsic::x86_mmx_psrli_w: 5069 case Intrinsic::x86_mmx_psrli_d: 5070 case Intrinsic::x86_mmx_psrli_q: 5071 case Intrinsic::x86_mmx_psrai_w: 5072 case Intrinsic::x86_mmx_psrai_d: { 5073 SDOperand ShAmt = Op.getOperand(2); 5074 if (isa<ConstantSDNode>(ShAmt)) 5075 return SDOperand(); 5076 5077 unsigned NewIntNo = 0; 5078 MVT::ValueType ShAmtVT = MVT::v4i32; 5079 switch (IntNo) { 5080 case Intrinsic::x86_sse2_pslli_w: 5081 NewIntNo = Intrinsic::x86_sse2_psll_w; 5082 break; 5083 case Intrinsic::x86_sse2_pslli_d: 5084 NewIntNo = Intrinsic::x86_sse2_psll_d; 5085 break; 5086 case Intrinsic::x86_sse2_pslli_q: 5087 NewIntNo = Intrinsic::x86_sse2_psll_q; 5088 break; 5089 case Intrinsic::x86_sse2_psrli_w: 5090 NewIntNo = Intrinsic::x86_sse2_psrl_w; 5091 break; 5092 case Intrinsic::x86_sse2_psrli_d: 5093 NewIntNo = Intrinsic::x86_sse2_psrl_d; 5094 break; 5095 case Intrinsic::x86_sse2_psrli_q: 5096 NewIntNo = Intrinsic::x86_sse2_psrl_q; 5097 break; 5098 case Intrinsic::x86_sse2_psrai_w: 5099 NewIntNo = Intrinsic::x86_sse2_psra_w; 5100 break; 5101 case Intrinsic::x86_sse2_psrai_d: 5102 NewIntNo = Intrinsic::x86_sse2_psra_d; 5103 break; 5104 default: { 5105 ShAmtVT = MVT::v2i32; 5106 switch (IntNo) { 5107 case Intrinsic::x86_mmx_pslli_w: 5108 NewIntNo = Intrinsic::x86_mmx_psll_w; 5109 break; 5110 case Intrinsic::x86_mmx_pslli_d: 5111 NewIntNo = Intrinsic::x86_mmx_psll_d; 5112 break; 5113 case Intrinsic::x86_mmx_pslli_q: 5114 NewIntNo = Intrinsic::x86_mmx_psll_q; 5115 break; 5116 case Intrinsic::x86_mmx_psrli_w: 5117 NewIntNo = Intrinsic::x86_mmx_psrl_w; 5118 break; 5119 case Intrinsic::x86_mmx_psrli_d: 5120 NewIntNo = Intrinsic::x86_mmx_psrl_d; 5121 break; 5122 case Intrinsic::x86_mmx_psrli_q: 5123 NewIntNo = Intrinsic::x86_mmx_psrl_q; 5124 break; 5125 case Intrinsic::x86_mmx_psrai_w: 5126 NewIntNo = Intrinsic::x86_mmx_psra_w; 5127 break; 5128 case Intrinsic::x86_mmx_psrai_d: 5129 NewIntNo = Intrinsic::x86_mmx_psra_d; 5130 break; 5131 default: abort(); // Can't reach here. 5132 } 5133 break; 5134 } 5135 } 5136 MVT::ValueType VT = Op.getValueType(); 5137 ShAmt = DAG.getNode(ISD::BIT_CONVERT, VT, 5138 DAG.getNode(ISD::SCALAR_TO_VECTOR, ShAmtVT, ShAmt)); 5139 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VT, 5140 DAG.getConstant(NewIntNo, MVT::i32), 5141 Op.getOperand(1), ShAmt); 5142 } 5143 } 5144} 5145 5146SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { 5147 // Depths > 0 not supported yet! 5148 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 5149 return SDOperand(); 5150 5151 // Just load the return address 5152 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 5153 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); 5154} 5155 5156SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { 5157 // Depths > 0 not supported yet! 5158 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 5159 return SDOperand(); 5160 5161 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 5162 return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI, 5163 DAG.getIntPtrConstant(4)); 5164} 5165 5166SDOperand X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDOperand Op, 5167 SelectionDAG &DAG) { 5168 // Is not yet supported on x86-64 5169 if (Subtarget->is64Bit()) 5170 return SDOperand(); 5171 5172 return DAG.getIntPtrConstant(8); 5173} 5174 5175SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG) 5176{ 5177 assert(!Subtarget->is64Bit() && 5178 "Lowering of eh_return builtin is not supported yet on x86-64"); 5179 5180 MachineFunction &MF = DAG.getMachineFunction(); 5181 SDOperand Chain = Op.getOperand(0); 5182 SDOperand Offset = Op.getOperand(1); 5183 SDOperand Handler = Op.getOperand(2); 5184 5185 SDOperand Frame = DAG.getRegister(RegInfo->getFrameRegister(MF), 5186 getPointerTy()); 5187 5188 SDOperand StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame, 5189 DAG.getIntPtrConstant(-4UL)); 5190 StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset); 5191 Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0); 5192 Chain = DAG.getCopyToReg(Chain, X86::ECX, StoreAddr); 5193 MF.getRegInfo().addLiveOut(X86::ECX); 5194 5195 return DAG.getNode(X86ISD::EH_RETURN, MVT::Other, 5196 Chain, DAG.getRegister(X86::ECX, getPointerTy())); 5197} 5198 5199SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, 5200 SelectionDAG &DAG) { 5201 SDOperand Root = Op.getOperand(0); 5202 SDOperand Trmp = Op.getOperand(1); // trampoline 5203 SDOperand FPtr = Op.getOperand(2); // nested function 5204 SDOperand Nest = Op.getOperand(3); // 'nest' parameter value 5205 5206 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 5207 5208 const X86InstrInfo *TII = 5209 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 5210 5211 if (Subtarget->is64Bit()) { 5212 SDOperand OutChains[6]; 5213 5214 // Large code-model. 5215 5216 const unsigned char JMP64r = TII->getBaseOpcodeFor(X86::JMP64r); 5217 const unsigned char MOV64ri = TII->getBaseOpcodeFor(X86::MOV64ri); 5218 5219 const unsigned char N86R10 = 5220 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R10); 5221 const unsigned char N86R11 = 5222 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R11); 5223 5224 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 5225 5226 // Load the pointer to the nested function into R11. 5227 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 5228 SDOperand Addr = Trmp; 5229 OutChains[0] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5230 TrmpAddr, 0); 5231 5232 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(2, MVT::i64)); 5233 OutChains[1] = DAG.getStore(Root, FPtr, Addr, TrmpAddr, 2, false, 2); 5234 5235 // Load the 'nest' parameter value into R10. 5236 // R10 is specified in X86CallingConv.td 5237 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 5238 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(10, MVT::i64)); 5239 OutChains[2] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5240 TrmpAddr, 10); 5241 5242 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(12, MVT::i64)); 5243 OutChains[3] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 12, false, 2); 5244 5245 // Jump to the nested function. 5246 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 5247 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(20, MVT::i64)); 5248 OutChains[4] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5249 TrmpAddr, 20); 5250 5251 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 5252 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(22, MVT::i64)); 5253 OutChains[5] = DAG.getStore(Root, DAG.getConstant(ModRM, MVT::i8), Addr, 5254 TrmpAddr, 22); 5255 5256 SDOperand Ops[] = 5257 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 6) }; 5258 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5259 } else { 5260 const Function *Func = 5261 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 5262 unsigned CC = Func->getCallingConv(); 5263 unsigned NestReg; 5264 5265 switch (CC) { 5266 default: 5267 assert(0 && "Unsupported calling convention"); 5268 case CallingConv::C: 5269 case CallingConv::X86_StdCall: { 5270 // Pass 'nest' parameter in ECX. 5271 // Must be kept in sync with X86CallingConv.td 5272 NestReg = X86::ECX; 5273 5274 // Check that ECX wasn't needed by an 'inreg' parameter. 5275 const FunctionType *FTy = Func->getFunctionType(); 5276 const PAListPtr &Attrs = Func->getParamAttrs(); 5277 5278 if (!Attrs.isEmpty() && !Func->isVarArg()) { 5279 unsigned InRegCount = 0; 5280 unsigned Idx = 1; 5281 5282 for (FunctionType::param_iterator I = FTy->param_begin(), 5283 E = FTy->param_end(); I != E; ++I, ++Idx) 5284 if (Attrs.paramHasAttr(Idx, ParamAttr::InReg)) 5285 // FIXME: should only count parameters that are lowered to integers. 5286 InRegCount += (getTargetData()->getTypeSizeInBits(*I) + 31) / 32; 5287 5288 if (InRegCount > 2) { 5289 cerr << "Nest register in use - reduce number of inreg parameters!\n"; 5290 abort(); 5291 } 5292 } 5293 break; 5294 } 5295 case CallingConv::X86_FastCall: 5296 // Pass 'nest' parameter in EAX. 5297 // Must be kept in sync with X86CallingConv.td 5298 NestReg = X86::EAX; 5299 break; 5300 } 5301 5302 SDOperand OutChains[4]; 5303 SDOperand Addr, Disp; 5304 5305 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32)); 5306 Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr); 5307 5308 const unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri); 5309 const unsigned char N86Reg = 5310 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(NestReg); 5311 OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 5312 Trmp, TrmpAddr, 0); 5313 5314 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32)); 5315 OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 1, false, 1); 5316 5317 const unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP); 5318 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32)); 5319 OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr, 5320 TrmpAddr, 5, false, 1); 5321 5322 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(6, MVT::i32)); 5323 OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpAddr, 6, false, 1); 5324 5325 SDOperand Ops[] = 5326 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) }; 5327 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5328 } 5329} 5330 5331SDOperand X86TargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { 5332 /* 5333 The rounding mode is in bits 11:10 of FPSR, and has the following 5334 settings: 5335 00 Round to nearest 5336 01 Round to -inf 5337 10 Round to +inf 5338 11 Round to 0 5339 5340 FLT_ROUNDS, on the other hand, expects the following: 5341 -1 Undefined 5342 0 Round to 0 5343 1 Round to nearest 5344 2 Round to +inf 5345 3 Round to -inf 5346 5347 To perform the conversion, we do: 5348 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 5349 */ 5350 5351 MachineFunction &MF = DAG.getMachineFunction(); 5352 const TargetMachine &TM = MF.getTarget(); 5353 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 5354 unsigned StackAlignment = TFI.getStackAlignment(); 5355 MVT::ValueType VT = Op.getValueType(); 5356 5357 // Save FP Control Word to stack slot 5358 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment); 5359 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 5360 5361 SDOperand Chain = DAG.getNode(X86ISD::FNSTCW16m, MVT::Other, 5362 DAG.getEntryNode(), StackSlot); 5363 5364 // Load FP Control Word from stack slot 5365 SDOperand CWD = DAG.getLoad(MVT::i16, Chain, StackSlot, NULL, 0); 5366 5367 // Transform as necessary 5368 SDOperand CWD1 = 5369 DAG.getNode(ISD::SRL, MVT::i16, 5370 DAG.getNode(ISD::AND, MVT::i16, 5371 CWD, DAG.getConstant(0x800, MVT::i16)), 5372 DAG.getConstant(11, MVT::i8)); 5373 SDOperand CWD2 = 5374 DAG.getNode(ISD::SRL, MVT::i16, 5375 DAG.getNode(ISD::AND, MVT::i16, 5376 CWD, DAG.getConstant(0x400, MVT::i16)), 5377 DAG.getConstant(9, MVT::i8)); 5378 5379 SDOperand RetVal = 5380 DAG.getNode(ISD::AND, MVT::i16, 5381 DAG.getNode(ISD::ADD, MVT::i16, 5382 DAG.getNode(ISD::OR, MVT::i16, CWD1, CWD2), 5383 DAG.getConstant(1, MVT::i16)), 5384 DAG.getConstant(3, MVT::i16)); 5385 5386 5387 return DAG.getNode((MVT::getSizeInBits(VT) < 16 ? 5388 ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); 5389} 5390 5391SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) { 5392 MVT::ValueType VT = Op.getValueType(); 5393 MVT::ValueType OpVT = VT; 5394 unsigned NumBits = MVT::getSizeInBits(VT); 5395 5396 Op = Op.getOperand(0); 5397 if (VT == MVT::i8) { 5398 // Zero extend to i32 since there is not an i8 bsr. 5399 OpVT = MVT::i32; 5400 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5401 } 5402 5403 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 5404 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5405 Op = DAG.getNode(X86ISD::BSR, VTs, Op); 5406 5407 // If src is zero (i.e. bsr sets ZF), returns NumBits. 5408 SmallVector<SDOperand, 4> Ops; 5409 Ops.push_back(Op); 5410 Ops.push_back(DAG.getConstant(NumBits+NumBits-1, OpVT)); 5411 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5412 Ops.push_back(Op.getValue(1)); 5413 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5414 5415 // Finally xor with NumBits-1. 5416 Op = DAG.getNode(ISD::XOR, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 5417 5418 if (VT == MVT::i8) 5419 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5420 return Op; 5421} 5422 5423SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { 5424 MVT::ValueType VT = Op.getValueType(); 5425 MVT::ValueType OpVT = VT; 5426 unsigned NumBits = MVT::getSizeInBits(VT); 5427 5428 Op = Op.getOperand(0); 5429 if (VT == MVT::i8) { 5430 OpVT = MVT::i32; 5431 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5432 } 5433 5434 // Issue a bsf (scan bits forward) which also sets EFLAGS. 5435 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5436 Op = DAG.getNode(X86ISD::BSF, VTs, Op); 5437 5438 // If src is zero (i.e. bsf sets ZF), returns NumBits. 5439 SmallVector<SDOperand, 4> Ops; 5440 Ops.push_back(Op); 5441 Ops.push_back(DAG.getConstant(NumBits, OpVT)); 5442 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5443 Ops.push_back(Op.getValue(1)); 5444 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5445 5446 if (VT == MVT::i8) 5447 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5448 return Op; 5449} 5450 5451SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) { 5452 MVT::ValueType T = cast<AtomicSDNode>(Op.Val)->getVT(); 5453 unsigned Reg = 0; 5454 unsigned size = 0; 5455 switch(T) { 5456 case MVT::i8: Reg = X86::AL; size = 1; break; 5457 case MVT::i16: Reg = X86::AX; size = 2; break; 5458 case MVT::i32: Reg = X86::EAX; size = 4; break; 5459 case MVT::i64: 5460 if (Subtarget->is64Bit()) { 5461 Reg = X86::RAX; size = 8; 5462 } else //Should go away when LowerType stuff lands 5463 return SDOperand(ExpandATOMIC_LCS(Op.Val, DAG), 0); 5464 break; 5465 }; 5466 SDOperand cpIn = DAG.getCopyToReg(Op.getOperand(0), Reg, 5467 Op.getOperand(3), SDOperand()); 5468 SDOperand Ops[] = { cpIn.getValue(0), 5469 Op.getOperand(1), 5470 Op.getOperand(2), 5471 DAG.getTargetConstant(size, MVT::i8), 5472 cpIn.getValue(1) }; 5473 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 5474 SDOperand Result = DAG.getNode(X86ISD::LCMPXCHG_DAG, Tys, Ops, 5); 5475 SDOperand cpOut = 5476 DAG.getCopyFromReg(Result.getValue(0), Reg, T, Result.getValue(1)); 5477 return cpOut; 5478} 5479 5480SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) { 5481 MVT::ValueType T = cast<AtomicSDNode>(Op)->getVT(); 5482 assert (T == MVT::i64 && "Only know how to expand i64 CAS"); 5483 SDOperand cpInL, cpInH; 5484 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), 5485 DAG.getConstant(0, MVT::i32)); 5486 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), 5487 DAG.getConstant(1, MVT::i32)); 5488 cpInL = DAG.getCopyToReg(Op->getOperand(0), X86::EAX, 5489 cpInL, SDOperand()); 5490 cpInH = DAG.getCopyToReg(cpInL.getValue(0), X86::EDX, 5491 cpInH, cpInL.getValue(1)); 5492 SDOperand swapInL, swapInH; 5493 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(2), 5494 DAG.getConstant(0, MVT::i32)); 5495 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(2), 5496 DAG.getConstant(1, MVT::i32)); 5497 swapInL = DAG.getCopyToReg(cpInH.getValue(0), X86::EBX, 5498 swapInL, cpInH.getValue(1)); 5499 swapInH = DAG.getCopyToReg(swapInL.getValue(0), X86::ECX, 5500 swapInH, swapInL.getValue(1)); 5501 SDOperand Ops[] = { swapInH.getValue(0), 5502 Op->getOperand(1), 5503 swapInH.getValue(1)}; 5504 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 5505 SDOperand Result = DAG.getNode(X86ISD::LCMPXCHG8_DAG, Tys, Ops, 3); 5506 SDOperand cpOutL = DAG.getCopyFromReg(Result.getValue(0), X86::EAX, MVT::i32, 5507 Result.getValue(1)); 5508 SDOperand cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), X86::EDX, MVT::i32, 5509 cpOutL.getValue(2)); 5510 SDOperand OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 5511 SDOperand ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OpsF, 2); 5512 Tys = DAG.getVTList(MVT::i64, MVT::Other); 5513 return DAG.getNode(ISD::MERGE_VALUES, Tys, ResultVal, cpOutH.getValue(1)).Val; 5514} 5515 5516SDNode* X86TargetLowering::ExpandATOMIC_LSS(SDNode* Op, SelectionDAG &DAG) { 5517 MVT::ValueType T = cast<AtomicSDNode>(Op)->getVT(); 5518 assert (T == MVT::i32 && "Only know how to expand i32 LSS"); 5519 SDOperand negOp = DAG.getNode(ISD::SUB, T, 5520 DAG.getConstant(0, T), Op->getOperand(2)); 5521 return DAG.getAtomic(ISD::ATOMIC_LAS, Op->getOperand(0), 5522 Op->getOperand(1), negOp, T).Val; 5523} 5524 5525/// LowerOperation - Provide custom lowering hooks for some operations. 5526/// 5527SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 5528 switch (Op.getOpcode()) { 5529 default: assert(0 && "Should not custom lower this!"); 5530 case ISD::ATOMIC_LCS: return LowerLCS(Op,DAG); 5531 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 5532 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5533 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 5534 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 5535 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 5536 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5537 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 5538 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5539 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 5540 case ISD::SHL_PARTS: 5541 case ISD::SRA_PARTS: 5542 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 5543 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 5544 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 5545 case ISD::FABS: return LowerFABS(Op, DAG); 5546 case ISD::FNEG: return LowerFNEG(Op, DAG); 5547 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 5548 case ISD::SETCC: return LowerSETCC(Op, DAG); 5549 case ISD::SELECT: return LowerSELECT(Op, DAG); 5550 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 5551 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 5552 case ISD::CALL: return LowerCALL(Op, DAG); 5553 case ISD::RET: return LowerRET(Op, DAG); 5554 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); 5555 case ISD::VASTART: return LowerVASTART(Op, DAG); 5556 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 5557 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 5558 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5559 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5560 case ISD::FRAME_TO_ARGS_OFFSET: 5561 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 5562 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 5563 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 5564 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 5565 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5566 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 5567 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 5568 5569 // FIXME: REMOVE THIS WHEN LegalizeDAGTypes lands. 5570 case ISD::READCYCLECOUNTER: 5571 return SDOperand(ExpandREADCYCLECOUNTER(Op.Val, DAG), 0); 5572 } 5573} 5574 5575/// ExpandOperation - Provide custom lowering hooks for expanding operations. 5576SDNode *X86TargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) { 5577 switch (N->getOpcode()) { 5578 default: assert(0 && "Should not custom lower this!"); 5579 case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG); 5580 case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG); 5581 case ISD::ATOMIC_LCS: return ExpandATOMIC_LCS(N, DAG); 5582 case ISD::ATOMIC_LSS: return ExpandATOMIC_LSS(N,DAG); 5583 } 5584} 5585 5586const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 5587 switch (Opcode) { 5588 default: return NULL; 5589 case X86ISD::BSF: return "X86ISD::BSF"; 5590 case X86ISD::BSR: return "X86ISD::BSR"; 5591 case X86ISD::SHLD: return "X86ISD::SHLD"; 5592 case X86ISD::SHRD: return "X86ISD::SHRD"; 5593 case X86ISD::FAND: return "X86ISD::FAND"; 5594 case X86ISD::FOR: return "X86ISD::FOR"; 5595 case X86ISD::FXOR: return "X86ISD::FXOR"; 5596 case X86ISD::FSRL: return "X86ISD::FSRL"; 5597 case X86ISD::FILD: return "X86ISD::FILD"; 5598 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 5599 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 5600 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 5601 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 5602 case X86ISD::FLD: return "X86ISD::FLD"; 5603 case X86ISD::FST: return "X86ISD::FST"; 5604 case X86ISD::CALL: return "X86ISD::CALL"; 5605 case X86ISD::TAILCALL: return "X86ISD::TAILCALL"; 5606 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 5607 case X86ISD::CMP: return "X86ISD::CMP"; 5608 case X86ISD::COMI: return "X86ISD::COMI"; 5609 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 5610 case X86ISD::SETCC: return "X86ISD::SETCC"; 5611 case X86ISD::CMOV: return "X86ISD::CMOV"; 5612 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 5613 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 5614 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 5615 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 5616 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 5617 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 5618 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 5619 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 5620 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 5621 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 5622 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 5623 case X86ISD::FMAX: return "X86ISD::FMAX"; 5624 case X86ISD::FMIN: return "X86ISD::FMIN"; 5625 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 5626 case X86ISD::FRCP: return "X86ISD::FRCP"; 5627 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 5628 case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER"; 5629 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 5630 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 5631 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 5632 case X86ISD::LCMPXCHG_DAG: return "x86ISD::LCMPXCHG_DAG"; 5633 case X86ISD::LCMPXCHG8_DAG: return "x86ISD::LCMPXCHG8_DAG"; 5634 } 5635} 5636 5637// isLegalAddressingMode - Return true if the addressing mode represented 5638// by AM is legal for this target, for a load/store of the specified type. 5639bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 5640 const Type *Ty) const { 5641 // X86 supports extremely general addressing modes. 5642 5643 // X86 allows a sign-extended 32-bit immediate field as a displacement. 5644 if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1) 5645 return false; 5646 5647 if (AM.BaseGV) { 5648 // We can only fold this if we don't need an extra load. 5649 if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false)) 5650 return false; 5651 5652 // X86-64 only supports addr of globals in small code model. 5653 if (Subtarget->is64Bit()) { 5654 if (getTargetMachine().getCodeModel() != CodeModel::Small) 5655 return false; 5656 // If lower 4G is not available, then we must use rip-relative addressing. 5657 if (AM.BaseOffs || AM.Scale > 1) 5658 return false; 5659 } 5660 } 5661 5662 switch (AM.Scale) { 5663 case 0: 5664 case 1: 5665 case 2: 5666 case 4: 5667 case 8: 5668 // These scales always work. 5669 break; 5670 case 3: 5671 case 5: 5672 case 9: 5673 // These scales are formed with basereg+scalereg. Only accept if there is 5674 // no basereg yet. 5675 if (AM.HasBaseReg) 5676 return false; 5677 break; 5678 default: // Other stuff never works. 5679 return false; 5680 } 5681 5682 return true; 5683} 5684 5685 5686bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const { 5687 if (!Ty1->isInteger() || !Ty2->isInteger()) 5688 return false; 5689 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 5690 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 5691 if (NumBits1 <= NumBits2) 5692 return false; 5693 return Subtarget->is64Bit() || NumBits1 < 64; 5694} 5695 5696bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1, 5697 MVT::ValueType VT2) const { 5698 if (!MVT::isInteger(VT1) || !MVT::isInteger(VT2)) 5699 return false; 5700 unsigned NumBits1 = MVT::getSizeInBits(VT1); 5701 unsigned NumBits2 = MVT::getSizeInBits(VT2); 5702 if (NumBits1 <= NumBits2) 5703 return false; 5704 return Subtarget->is64Bit() || NumBits1 < 64; 5705} 5706 5707/// isShuffleMaskLegal - Targets can use this to indicate that they only 5708/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 5709/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 5710/// are assumed to be legal. 5711bool 5712X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { 5713 // Only do shuffles on 128-bit vector types for now. 5714 if (MVT::getSizeInBits(VT) == 64) return false; 5715 return (Mask.Val->getNumOperands() <= 4 || 5716 isIdentityMask(Mask.Val) || 5717 isIdentityMask(Mask.Val, true) || 5718 isSplatMask(Mask.Val) || 5719 isPSHUFHW_PSHUFLWMask(Mask.Val) || 5720 X86::isUNPCKLMask(Mask.Val) || 5721 X86::isUNPCKHMask(Mask.Val) || 5722 X86::isUNPCKL_v_undef_Mask(Mask.Val) || 5723 X86::isUNPCKH_v_undef_Mask(Mask.Val)); 5724} 5725 5726bool 5727X86TargetLowering::isVectorClearMaskLegal(const std::vector<SDOperand> &BVOps, 5728 MVT::ValueType EVT, 5729 SelectionDAG &DAG) const { 5730 unsigned NumElts = BVOps.size(); 5731 // Only do shuffles on 128-bit vector types for now. 5732 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; 5733 if (NumElts == 2) return true; 5734 if (NumElts == 4) { 5735 return (isMOVLMask(&BVOps[0], 4) || 5736 isCommutedMOVL(&BVOps[0], 4, true) || 5737 isSHUFPMask(&BVOps[0], 4) || 5738 isCommutedSHUFP(&BVOps[0], 4)); 5739 } 5740 return false; 5741} 5742 5743//===----------------------------------------------------------------------===// 5744// X86 Scheduler Hooks 5745//===----------------------------------------------------------------------===// 5746 5747// private utility function 5748MachineBasicBlock * 5749X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr, 5750 MachineBasicBlock *MBB, 5751 unsigned regOpc, 5752 unsigned immOpc) { 5753 // For the atomic bitwise operator, we generate 5754 // thisMBB: 5755 // newMBB: 5756 // ld EAX = [bitinstr.addr] 5757 // mov t1 = EAX 5758 // op t2 = t1, [bitinstr.val] 5759 // lcs dest = [bitinstr.addr], t2 [EAX is implicit] 5760 // bz newMBB 5761 // fallthrough -->nextMBB 5762 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5763 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 5764 ilist<MachineBasicBlock>::iterator MBBIter = MBB; 5765 ++MBBIter; 5766 5767 /// First build the CFG 5768 MachineFunction *F = MBB->getParent(); 5769 MachineBasicBlock *thisMBB = MBB; 5770 MachineBasicBlock *newMBB = new MachineBasicBlock(LLVM_BB); 5771 MachineBasicBlock *nextMBB = new MachineBasicBlock(LLVM_BB); 5772 F->getBasicBlockList().insert(MBBIter, newMBB); 5773 F->getBasicBlockList().insert(MBBIter, nextMBB); 5774 5775 // Move all successors to thisMBB to nextMBB 5776 nextMBB->transferSuccessors(thisMBB); 5777 5778 // Update thisMBB to fall through to newMBB 5779 thisMBB->addSuccessor(newMBB); 5780 5781 // newMBB jumps to itself and fall through to nextMBB 5782 newMBB->addSuccessor(nextMBB); 5783 newMBB->addSuccessor(newMBB); 5784 5785 // Insert instructions into newMBB based on incoming instruction 5786 assert(bInstr->getNumOperands() < 8 && "unexpected number of operands"); 5787 MachineOperand& destOper = bInstr->getOperand(0); 5788 MachineOperand* argOpers[6]; 5789 int numArgs = bInstr->getNumOperands() - 1; 5790 for (int i=0; i < numArgs; ++i) 5791 argOpers[i] = &bInstr->getOperand(i+1); 5792 5793 // x86 address has 4 operands: base, index, scale, and displacement 5794 int lastAddrIndx = 3; // [0,3] 5795 int valArgIndx = 4; 5796 5797 MachineInstrBuilder MIB = BuildMI(newMBB, TII->get(X86::MOV32rm), X86::EAX); 5798 for (int i=0; i <= lastAddrIndx; ++i) 5799 (*MIB).addOperand(*argOpers[i]); 5800 5801 unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 5802 MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), t1); 5803 MIB.addReg(X86::EAX); 5804 5805 unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 5806 assert( (argOpers[valArgIndx]->isReg() || argOpers[valArgIndx]->isImm()) 5807 && "invalid operand"); 5808 if (argOpers[valArgIndx]->isReg()) 5809 MIB = BuildMI(newMBB, TII->get(regOpc), t2); 5810 else 5811 MIB = BuildMI(newMBB, TII->get(immOpc), t2); 5812 MIB.addReg(t1); 5813 (*MIB).addOperand(*argOpers[valArgIndx]); 5814 5815 MIB = BuildMI(newMBB, TII->get(X86::LCMPXCHG32)); 5816 for (int i=0; i <= lastAddrIndx; ++i) 5817 (*MIB).addOperand(*argOpers[i]); 5818 MIB.addReg(t2); 5819 5820 MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), destOper.getReg()); 5821 MIB.addReg(X86::EAX); 5822 5823 // insert branch 5824 BuildMI(newMBB, TII->get(X86::JNE)).addMBB(newMBB); 5825 5826 delete bInstr; // The pseudo instruction is gone now. 5827 return nextMBB; 5828} 5829 5830// private utility function 5831MachineBasicBlock * 5832X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr, 5833 MachineBasicBlock *MBB, 5834 unsigned cmovOpc) { 5835 // For the atomic min/max operator, we generate 5836 // thisMBB: 5837 // newMBB: 5838 // ld EAX = [min/max.addr] 5839 // mov t1 = EAX 5840 // mov t2 = [min/max.val] 5841 // cmp t1, t2 5842 // cmov[cond] t2 = t1 5843 // lcs dest = [bitinstr.addr], t2 [EAX is implicit] 5844 // bz newMBB 5845 // fallthrough -->nextMBB 5846 // 5847 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5848 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 5849 ilist<MachineBasicBlock>::iterator MBBIter = MBB; 5850 ++MBBIter; 5851 5852 /// First build the CFG 5853 MachineFunction *F = MBB->getParent(); 5854 MachineBasicBlock *thisMBB = MBB; 5855 MachineBasicBlock *newMBB = new MachineBasicBlock(LLVM_BB); 5856 MachineBasicBlock *nextMBB = new MachineBasicBlock(LLVM_BB); 5857 F->getBasicBlockList().insert(MBBIter, newMBB); 5858 F->getBasicBlockList().insert(MBBIter, nextMBB); 5859 5860 // Move all successors to thisMBB to nextMBB 5861 nextMBB->transferSuccessors(thisMBB); 5862 5863 // Update thisMBB to fall through to newMBB 5864 thisMBB->addSuccessor(newMBB); 5865 5866 // newMBB jumps to newMBB and fall through to nextMBB 5867 newMBB->addSuccessor(nextMBB); 5868 newMBB->addSuccessor(newMBB); 5869 5870 // Insert instructions into newMBB based on incoming instruction 5871 assert(mInstr->getNumOperands() < 8 && "unexpected number of operands"); 5872 MachineOperand& destOper = mInstr->getOperand(0); 5873 MachineOperand* argOpers[6]; 5874 int numArgs = mInstr->getNumOperands() - 1; 5875 for (int i=0; i < numArgs; ++i) 5876 argOpers[i] = &mInstr->getOperand(i+1); 5877 5878 // x86 address has 4 operands: base, index, scale, and displacement 5879 int lastAddrIndx = 3; // [0,3] 5880 int valArgIndx = 4; 5881 5882 MachineInstrBuilder MIB = BuildMI(newMBB, TII->get(X86::MOV32rm), X86::EAX); 5883 for (int i=0; i <= lastAddrIndx; ++i) 5884 (*MIB).addOperand(*argOpers[i]); 5885 5886 unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 5887 MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), t1); 5888 MIB.addReg(X86::EAX); 5889 5890 // We only support register and immediate values 5891 assert( (argOpers[valArgIndx]->isReg() || argOpers[valArgIndx]->isImm()) 5892 && "invalid operand"); 5893 5894 unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 5895 if (argOpers[valArgIndx]->isReg()) 5896 MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), t2); 5897 else 5898 MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), t2); 5899 (*MIB).addOperand(*argOpers[valArgIndx]); 5900 5901 MIB = BuildMI(newMBB, TII->get(X86::CMP32rr)); 5902 MIB.addReg(t1); 5903 MIB.addReg(t2); 5904 5905 // Generate movc 5906 unsigned t3 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 5907 MIB = BuildMI(newMBB, TII->get(cmovOpc),t3); 5908 MIB.addReg(t2); 5909 MIB.addReg(t1); 5910 5911 // Cmp and exchange if none has modified the memory location 5912 MIB = BuildMI(newMBB, TII->get(X86::LCMPXCHG32)); 5913 for (int i=0; i <= lastAddrIndx; ++i) 5914 (*MIB).addOperand(*argOpers[i]); 5915 MIB.addReg(t3); 5916 5917 MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), destOper.getReg()); 5918 MIB.addReg(X86::EAX); 5919 5920 // insert branch 5921 BuildMI(newMBB, TII->get(X86::JNE)).addMBB(newMBB); 5922 5923 delete mInstr; // The pseudo instruction is gone now. 5924 return nextMBB; 5925} 5926 5927 5928MachineBasicBlock * 5929X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 5930 MachineBasicBlock *BB) { 5931 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5932 switch (MI->getOpcode()) { 5933 default: assert(false && "Unexpected instr type to insert"); 5934 case X86::CMOV_FR32: 5935 case X86::CMOV_FR64: 5936 case X86::CMOV_V4F32: 5937 case X86::CMOV_V2F64: 5938 case X86::CMOV_V2I64: { 5939 // To "insert" a SELECT_CC instruction, we actually have to insert the 5940 // diamond control-flow pattern. The incoming instruction knows the 5941 // destination vreg to set, the condition code register to branch on, the 5942 // true/false values to select between, and a branch opcode to use. 5943 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5944 ilist<MachineBasicBlock>::iterator It = BB; 5945 ++It; 5946 5947 // thisMBB: 5948 // ... 5949 // TrueVal = ... 5950 // cmpTY ccX, r1, r2 5951 // bCC copy1MBB 5952 // fallthrough --> copy0MBB 5953 MachineBasicBlock *thisMBB = BB; 5954 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 5955 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 5956 unsigned Opc = 5957 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 5958 BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB); 5959 MachineFunction *F = BB->getParent(); 5960 F->getBasicBlockList().insert(It, copy0MBB); 5961 F->getBasicBlockList().insert(It, sinkMBB); 5962 // Update machine-CFG edges by transferring all successors of the current 5963 // block to the new block which will contain the Phi node for the select. 5964 sinkMBB->transferSuccessors(BB); 5965 5966 // Add the true and fallthrough blocks as its successors. 5967 BB->addSuccessor(copy0MBB); 5968 BB->addSuccessor(sinkMBB); 5969 5970 // copy0MBB: 5971 // %FalseValue = ... 5972 // # fallthrough to sinkMBB 5973 BB = copy0MBB; 5974 5975 // Update machine-CFG edges 5976 BB->addSuccessor(sinkMBB); 5977 5978 // sinkMBB: 5979 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 5980 // ... 5981 BB = sinkMBB; 5982 BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg()) 5983 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 5984 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 5985 5986 delete MI; // The pseudo instruction is gone now. 5987 return BB; 5988 } 5989 5990 case X86::FP32_TO_INT16_IN_MEM: 5991 case X86::FP32_TO_INT32_IN_MEM: 5992 case X86::FP32_TO_INT64_IN_MEM: 5993 case X86::FP64_TO_INT16_IN_MEM: 5994 case X86::FP64_TO_INT32_IN_MEM: 5995 case X86::FP64_TO_INT64_IN_MEM: 5996 case X86::FP80_TO_INT16_IN_MEM: 5997 case X86::FP80_TO_INT32_IN_MEM: 5998 case X86::FP80_TO_INT64_IN_MEM: { 5999 // Change the floating point control register to use "round towards zero" 6000 // mode when truncating to an integer value. 6001 MachineFunction *F = BB->getParent(); 6002 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2); 6003 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx); 6004 6005 // Load the old value of the high byte of the control word... 6006 unsigned OldCW = 6007 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass); 6008 addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx); 6009 6010 // Set the high part to be round to zero... 6011 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx) 6012 .addImm(0xC7F); 6013 6014 // Reload the modified control word now... 6015 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 6016 6017 // Restore the memory image of control word to original value 6018 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx) 6019 .addReg(OldCW); 6020 6021 // Get the X86 opcode to use. 6022 unsigned Opc; 6023 switch (MI->getOpcode()) { 6024 default: assert(0 && "illegal opcode!"); 6025 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 6026 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 6027 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 6028 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 6029 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 6030 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 6031 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 6032 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 6033 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 6034 } 6035 6036 X86AddressMode AM; 6037 MachineOperand &Op = MI->getOperand(0); 6038 if (Op.isRegister()) { 6039 AM.BaseType = X86AddressMode::RegBase; 6040 AM.Base.Reg = Op.getReg(); 6041 } else { 6042 AM.BaseType = X86AddressMode::FrameIndexBase; 6043 AM.Base.FrameIndex = Op.getIndex(); 6044 } 6045 Op = MI->getOperand(1); 6046 if (Op.isImmediate()) 6047 AM.Scale = Op.getImm(); 6048 Op = MI->getOperand(2); 6049 if (Op.isImmediate()) 6050 AM.IndexReg = Op.getImm(); 6051 Op = MI->getOperand(3); 6052 if (Op.isGlobalAddress()) { 6053 AM.GV = Op.getGlobal(); 6054 } else { 6055 AM.Disp = Op.getImm(); 6056 } 6057 addFullAddress(BuildMI(BB, TII->get(Opc)), AM) 6058 .addReg(MI->getOperand(4).getReg()); 6059 6060 // Reload the original control word now. 6061 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 6062 6063 delete MI; // The pseudo instruction is gone now. 6064 return BB; 6065 } 6066 case X86::ATOMAND32: 6067 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, 6068 X86::AND32ri); 6069 case X86::ATOMOR32: 6070 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr, 6071 X86::OR32ri); 6072 case X86::ATOMXOR32: 6073 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr, 6074 X86::XOR32ri); 6075 case X86::ATOMMIN32: 6076 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr); 6077 case X86::ATOMMAX32: 6078 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG32rr); 6079 case X86::ATOMUMIN32: 6080 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB32rr); 6081 case X86::ATOMUMAX32: 6082 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA32rr); 6083 } 6084} 6085 6086//===----------------------------------------------------------------------===// 6087// X86 Optimization Hooks 6088//===----------------------------------------------------------------------===// 6089 6090void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 6091 const APInt &Mask, 6092 APInt &KnownZero, 6093 APInt &KnownOne, 6094 const SelectionDAG &DAG, 6095 unsigned Depth) const { 6096 unsigned Opc = Op.getOpcode(); 6097 assert((Opc >= ISD::BUILTIN_OP_END || 6098 Opc == ISD::INTRINSIC_WO_CHAIN || 6099 Opc == ISD::INTRINSIC_W_CHAIN || 6100 Opc == ISD::INTRINSIC_VOID) && 6101 "Should use MaskedValueIsZero if you don't know whether Op" 6102 " is a target node!"); 6103 6104 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything. 6105 switch (Opc) { 6106 default: break; 6107 case X86ISD::SETCC: 6108 KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(), 6109 Mask.getBitWidth() - 1); 6110 break; 6111 } 6112} 6113 6114/// getShuffleScalarElt - Returns the scalar element that will make up the ith 6115/// element of the result of the vector shuffle. 6116static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) { 6117 MVT::ValueType VT = N->getValueType(0); 6118 SDOperand PermMask = N->getOperand(2); 6119 unsigned NumElems = PermMask.getNumOperands(); 6120 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1); 6121 i %= NumElems; 6122 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) { 6123 return (i == 0) 6124 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 6125 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) { 6126 SDOperand Idx = PermMask.getOperand(i); 6127 if (Idx.getOpcode() == ISD::UNDEF) 6128 return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 6129 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG); 6130 } 6131 return SDOperand(); 6132} 6133 6134/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 6135/// node is a GlobalAddress + an offset. 6136static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) { 6137 unsigned Opc = N->getOpcode(); 6138 if (Opc == X86ISD::Wrapper) { 6139 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) { 6140 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 6141 return true; 6142 } 6143 } else if (Opc == ISD::ADD) { 6144 SDOperand N1 = N->getOperand(0); 6145 SDOperand N2 = N->getOperand(1); 6146 if (isGAPlusOffset(N1.Val, GA, Offset)) { 6147 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 6148 if (V) { 6149 Offset += V->getSignExtended(); 6150 return true; 6151 } 6152 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 6153 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 6154 if (V) { 6155 Offset += V->getSignExtended(); 6156 return true; 6157 } 6158 } 6159 } 6160 return false; 6161} 6162 6163/// isConsecutiveLoad - Returns true if N is loading from an address of Base 6164/// + Dist * Size. 6165static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size, 6166 MachineFrameInfo *MFI) { 6167 if (N->getOperand(0).Val != Base->getOperand(0).Val) 6168 return false; 6169 6170 SDOperand Loc = N->getOperand(1); 6171 SDOperand BaseLoc = Base->getOperand(1); 6172 if (Loc.getOpcode() == ISD::FrameIndex) { 6173 if (BaseLoc.getOpcode() != ISD::FrameIndex) 6174 return false; 6175 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 6176 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 6177 int FS = MFI->getObjectSize(FI); 6178 int BFS = MFI->getObjectSize(BFI); 6179 if (FS != BFS || FS != Size) return false; 6180 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size); 6181 } else { 6182 GlobalValue *GV1 = NULL; 6183 GlobalValue *GV2 = NULL; 6184 int64_t Offset1 = 0; 6185 int64_t Offset2 = 0; 6186 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 6187 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 6188 if (isGA1 && isGA2 && GV1 == GV2) 6189 return Offset1 == (Offset2 + Dist*Size); 6190 } 6191 6192 return false; 6193} 6194 6195static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI, 6196 const X86Subtarget *Subtarget) { 6197 GlobalValue *GV; 6198 int64_t Offset = 0; 6199 if (isGAPlusOffset(Base, GV, Offset)) 6200 return (GV->getAlignment() >= 16 && (Offset % 16) == 0); 6201 // DAG combine handles the stack object case. 6202 return false; 6203} 6204 6205 6206/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 6207/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 6208/// if the load addresses are consecutive, non-overlapping, and in the right 6209/// order. 6210static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 6211 const X86Subtarget *Subtarget) { 6212 MachineFunction &MF = DAG.getMachineFunction(); 6213 MachineFrameInfo *MFI = MF.getFrameInfo(); 6214 MVT::ValueType VT = N->getValueType(0); 6215 MVT::ValueType EVT = MVT::getVectorElementType(VT); 6216 SDOperand PermMask = N->getOperand(2); 6217 unsigned NumElems = PermMask.getNumOperands(); 6218 SDNode *Base = NULL; 6219 for (unsigned i = 0; i < NumElems; ++i) { 6220 SDOperand Elt = PermMask.getOperand(i); 6221 if (Elt.getOpcode() == ISD::UNDEF) { 6222 if (!Base) 6223 return SDOperand(); 6224 continue; 6225 } 6226 6227 unsigned Idx = cast<ConstantSDNode>(Elt)->getValue(); 6228 SDOperand Arg = getShuffleScalarElt(N, Idx, DAG); 6229 if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val)) 6230 return SDOperand(); 6231 if (!Base) { 6232 Base = Arg.Val; 6233 continue; 6234 } 6235 6236 if (!isConsecutiveLoad(Arg.Val, Base, i, MVT::getSizeInBits(EVT)/8,MFI)) 6237 return SDOperand(); 6238 } 6239 6240 LoadSDNode *LD = cast<LoadSDNode>(Base); 6241 if (isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget)) 6242 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 6243 LD->getSrcValueOffset(), LD->isVolatile()); 6244 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 6245 LD->getSrcValueOffset(), LD->isVolatile(), 6246 LD->getAlignment()); 6247} 6248 6249/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 6250static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 6251 const X86Subtarget *Subtarget) { 6252 SDOperand Cond = N->getOperand(0); 6253 6254 // If we have SSE[12] support, try to form min/max nodes. 6255 if (Subtarget->hasSSE2() && 6256 (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) { 6257 if (Cond.getOpcode() == ISD::SETCC) { 6258 // Get the LHS/RHS of the select. 6259 SDOperand LHS = N->getOperand(1); 6260 SDOperand RHS = N->getOperand(2); 6261 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 6262 6263 unsigned Opcode = 0; 6264 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) { 6265 switch (CC) { 6266 default: break; 6267 case ISD::SETOLE: // (X <= Y) ? X : Y -> min 6268 case ISD::SETULE: 6269 case ISD::SETLE: 6270 if (!UnsafeFPMath) break; 6271 // FALL THROUGH. 6272 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min 6273 case ISD::SETLT: 6274 Opcode = X86ISD::FMIN; 6275 break; 6276 6277 case ISD::SETOGT: // (X > Y) ? X : Y -> max 6278 case ISD::SETUGT: 6279 case ISD::SETGT: 6280 if (!UnsafeFPMath) break; 6281 // FALL THROUGH. 6282 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max 6283 case ISD::SETGE: 6284 Opcode = X86ISD::FMAX; 6285 break; 6286 } 6287 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) { 6288 switch (CC) { 6289 default: break; 6290 case ISD::SETOGT: // (X > Y) ? Y : X -> min 6291 case ISD::SETUGT: 6292 case ISD::SETGT: 6293 if (!UnsafeFPMath) break; 6294 // FALL THROUGH. 6295 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min 6296 case ISD::SETGE: 6297 Opcode = X86ISD::FMIN; 6298 break; 6299 6300 case ISD::SETOLE: // (X <= Y) ? Y : X -> max 6301 case ISD::SETULE: 6302 case ISD::SETLE: 6303 if (!UnsafeFPMath) break; 6304 // FALL THROUGH. 6305 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max 6306 case ISD::SETLT: 6307 Opcode = X86ISD::FMAX; 6308 break; 6309 } 6310 } 6311 6312 if (Opcode) 6313 return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS); 6314 } 6315 6316 } 6317 6318 return SDOperand(); 6319} 6320 6321/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 6322static SDOperand PerformSTORECombine(StoreSDNode *St, SelectionDAG &DAG, 6323 const X86Subtarget *Subtarget) { 6324 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 6325 // the FP state in cases where an emms may be missing. 6326 // A preferable solution to the general problem is to figure out the right 6327 // places to insert EMMS. This qualifies as a quick hack. 6328 if (MVT::isVector(St->getValue().getValueType()) && 6329 MVT::getSizeInBits(St->getValue().getValueType()) == 64 && 6330 isa<LoadSDNode>(St->getValue()) && 6331 !cast<LoadSDNode>(St->getValue())->isVolatile() && 6332 St->getChain().hasOneUse() && !St->isVolatile()) { 6333 SDNode* LdVal = St->getValue().Val; 6334 LoadSDNode *Ld = 0; 6335 int TokenFactorIndex = -1; 6336 SmallVector<SDOperand, 8> Ops; 6337 SDNode* ChainVal = St->getChain().Val; 6338 // Must be a store of a load. We currently handle two cases: the load 6339 // is a direct child, and it's under an intervening TokenFactor. It is 6340 // possible to dig deeper under nested TokenFactors. 6341 if (ChainVal == LdVal) 6342 Ld = cast<LoadSDNode>(St->getChain()); 6343 else if (St->getValue().hasOneUse() && 6344 ChainVal->getOpcode() == ISD::TokenFactor) { 6345 for (unsigned i=0, e = ChainVal->getNumOperands(); i != e; ++i) { 6346 if (ChainVal->getOperand(i).Val == LdVal) { 6347 TokenFactorIndex = i; 6348 Ld = cast<LoadSDNode>(St->getValue()); 6349 } else 6350 Ops.push_back(ChainVal->getOperand(i)); 6351 } 6352 } 6353 if (Ld) { 6354 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 6355 if (Subtarget->is64Bit()) { 6356 SDOperand NewLd = DAG.getLoad(MVT::i64, Ld->getChain(), 6357 Ld->getBasePtr(), Ld->getSrcValue(), 6358 Ld->getSrcValueOffset(), Ld->isVolatile(), 6359 Ld->getAlignment()); 6360 SDOperand NewChain = NewLd.getValue(1); 6361 if (TokenFactorIndex != -1) { 6362 Ops.push_back(NewChain); 6363 NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0], 6364 Ops.size()); 6365 } 6366 return DAG.getStore(NewChain, NewLd, St->getBasePtr(), 6367 St->getSrcValue(), St->getSrcValueOffset(), 6368 St->isVolatile(), St->getAlignment()); 6369 } 6370 6371 // Otherwise, lower to two 32-bit copies. 6372 SDOperand LoAddr = Ld->getBasePtr(); 6373 SDOperand HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, 6374 DAG.getConstant(MVT::i32, 4)); 6375 6376 SDOperand LoLd = DAG.getLoad(MVT::i32, Ld->getChain(), LoAddr, 6377 Ld->getSrcValue(), Ld->getSrcValueOffset(), 6378 Ld->isVolatile(), Ld->getAlignment()); 6379 SDOperand HiLd = DAG.getLoad(MVT::i32, Ld->getChain(), HiAddr, 6380 Ld->getSrcValue(), Ld->getSrcValueOffset()+4, 6381 Ld->isVolatile(), 6382 MinAlign(Ld->getAlignment(), 4)); 6383 6384 SDOperand NewChain = LoLd.getValue(1); 6385 if (TokenFactorIndex != -1) { 6386 Ops.push_back(LoLd); 6387 Ops.push_back(HiLd); 6388 NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0], 6389 Ops.size()); 6390 } 6391 6392 LoAddr = St->getBasePtr(); 6393 HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, 6394 DAG.getConstant(MVT::i32, 4)); 6395 6396 SDOperand LoSt = DAG.getStore(NewChain, LoLd, LoAddr, 6397 St->getSrcValue(), St->getSrcValueOffset(), 6398 St->isVolatile(), St->getAlignment()); 6399 SDOperand HiSt = DAG.getStore(NewChain, HiLd, HiAddr, 6400 St->getSrcValue(), St->getSrcValueOffset()+4, 6401 St->isVolatile(), 6402 MinAlign(St->getAlignment(), 4)); 6403 return DAG.getNode(ISD::TokenFactor, MVT::Other, LoSt, HiSt); 6404 } 6405 } 6406 return SDOperand(); 6407} 6408 6409/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 6410/// X86ISD::FXOR nodes. 6411static SDOperand PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 6412 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 6413 // F[X]OR(0.0, x) -> x 6414 // F[X]OR(x, 0.0) -> x 6415 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 6416 if (C->getValueAPF().isPosZero()) 6417 return N->getOperand(1); 6418 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 6419 if (C->getValueAPF().isPosZero()) 6420 return N->getOperand(0); 6421 return SDOperand(); 6422} 6423 6424/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 6425static SDOperand PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 6426 // FAND(0.0, x) -> 0.0 6427 // FAND(x, 0.0) -> 0.0 6428 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 6429 if (C->getValueAPF().isPosZero()) 6430 return N->getOperand(0); 6431 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 6432 if (C->getValueAPF().isPosZero()) 6433 return N->getOperand(1); 6434 return SDOperand(); 6435} 6436 6437 6438SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, 6439 DAGCombinerInfo &DCI) const { 6440 SelectionDAG &DAG = DCI.DAG; 6441 switch (N->getOpcode()) { 6442 default: break; 6443 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, Subtarget); 6444 case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget); 6445 case ISD::STORE: 6446 return PerformSTORECombine(cast<StoreSDNode>(N), DAG, Subtarget); 6447 case X86ISD::FXOR: 6448 case X86ISD::FOR: return PerformFORCombine(N, DAG); 6449 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 6450 } 6451 6452 return SDOperand(); 6453} 6454 6455//===----------------------------------------------------------------------===// 6456// X86 Inline Assembly Support 6457//===----------------------------------------------------------------------===// 6458 6459/// getConstraintType - Given a constraint letter, return the type of 6460/// constraint it is for this target. 6461X86TargetLowering::ConstraintType 6462X86TargetLowering::getConstraintType(const std::string &Constraint) const { 6463 if (Constraint.size() == 1) { 6464 switch (Constraint[0]) { 6465 case 'A': 6466 case 'f': 6467 case 'r': 6468 case 'R': 6469 case 'l': 6470 case 'q': 6471 case 'Q': 6472 case 'x': 6473 case 'y': 6474 case 'Y': 6475 return C_RegisterClass; 6476 default: 6477 break; 6478 } 6479 } 6480 return TargetLowering::getConstraintType(Constraint); 6481} 6482 6483/// LowerXConstraint - try to replace an X constraint, which matches anything, 6484/// with another that has more specific requirements based on the type of the 6485/// corresponding operand. 6486const char *X86TargetLowering:: 6487LowerXConstraint(MVT::ValueType ConstraintVT) const { 6488 // FP X constraints get lowered to SSE1/2 registers if available, otherwise 6489 // 'f' like normal targets. 6490 if (MVT::isFloatingPoint(ConstraintVT)) { 6491 if (Subtarget->hasSSE2()) 6492 return "Y"; 6493 if (Subtarget->hasSSE1()) 6494 return "x"; 6495 } 6496 6497 return TargetLowering::LowerXConstraint(ConstraintVT); 6498} 6499 6500/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 6501/// vector. If it is invalid, don't add anything to Ops. 6502void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, 6503 char Constraint, 6504 std::vector<SDOperand>&Ops, 6505 SelectionDAG &DAG) const { 6506 SDOperand Result(0, 0); 6507 6508 switch (Constraint) { 6509 default: break; 6510 case 'I': 6511 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 6512 if (C->getValue() <= 31) { 6513 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 6514 break; 6515 } 6516 } 6517 return; 6518 case 'N': 6519 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 6520 if (C->getValue() <= 255) { 6521 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 6522 break; 6523 } 6524 } 6525 return; 6526 case 'i': { 6527 // Literal immediates are always ok. 6528 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 6529 Result = DAG.getTargetConstant(CST->getValue(), Op.getValueType()); 6530 break; 6531 } 6532 6533 // If we are in non-pic codegen mode, we allow the address of a global (with 6534 // an optional displacement) to be used with 'i'. 6535 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 6536 int64_t Offset = 0; 6537 6538 // Match either (GA) or (GA+C) 6539 if (GA) { 6540 Offset = GA->getOffset(); 6541 } else if (Op.getOpcode() == ISD::ADD) { 6542 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6543 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 6544 if (C && GA) { 6545 Offset = GA->getOffset()+C->getValue(); 6546 } else { 6547 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6548 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 6549 if (C && GA) 6550 Offset = GA->getOffset()+C->getValue(); 6551 else 6552 C = 0, GA = 0; 6553 } 6554 } 6555 6556 if (GA) { 6557 // If addressing this global requires a load (e.g. in PIC mode), we can't 6558 // match. 6559 if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(), 6560 false)) 6561 return; 6562 6563 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), 6564 Offset); 6565 Result = Op; 6566 break; 6567 } 6568 6569 // Otherwise, not valid for this mode. 6570 return; 6571 } 6572 } 6573 6574 if (Result.Val) { 6575 Ops.push_back(Result); 6576 return; 6577 } 6578 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 6579} 6580 6581std::vector<unsigned> X86TargetLowering:: 6582getRegClassForInlineAsmConstraint(const std::string &Constraint, 6583 MVT::ValueType VT) const { 6584 if (Constraint.size() == 1) { 6585 // FIXME: not handling fp-stack yet! 6586 switch (Constraint[0]) { // GCC X86 Constraint Letters 6587 default: break; // Unknown constraint letter 6588 case 'A': // EAX/EDX 6589 if (VT == MVT::i32 || VT == MVT::i64) 6590 return make_vector<unsigned>(X86::EAX, X86::EDX, 0); 6591 break; 6592 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode) 6593 case 'Q': // Q_REGS 6594 if (VT == MVT::i32) 6595 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 6596 else if (VT == MVT::i16) 6597 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 6598 else if (VT == MVT::i8) 6599 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0); 6600 else if (VT == MVT::i64) 6601 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0); 6602 break; 6603 } 6604 } 6605 6606 return std::vector<unsigned>(); 6607} 6608 6609std::pair<unsigned, const TargetRegisterClass*> 6610X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 6611 MVT::ValueType VT) const { 6612 // First, see if this is a constraint that directly corresponds to an LLVM 6613 // register class. 6614 if (Constraint.size() == 1) { 6615 // GCC Constraint Letters 6616 switch (Constraint[0]) { 6617 default: break; 6618 case 'r': // GENERAL_REGS 6619 case 'R': // LEGACY_REGS 6620 case 'l': // INDEX_REGS 6621 if (VT == MVT::i64 && Subtarget->is64Bit()) 6622 return std::make_pair(0U, X86::GR64RegisterClass); 6623 if (VT == MVT::i32) 6624 return std::make_pair(0U, X86::GR32RegisterClass); 6625 else if (VT == MVT::i16) 6626 return std::make_pair(0U, X86::GR16RegisterClass); 6627 else if (VT == MVT::i8) 6628 return std::make_pair(0U, X86::GR8RegisterClass); 6629 break; 6630 case 'f': // FP Stack registers. 6631 // If SSE is enabled for this VT, use f80 to ensure the isel moves the 6632 // value to the correct fpstack register class. 6633 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) 6634 return std::make_pair(0U, X86::RFP32RegisterClass); 6635 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) 6636 return std::make_pair(0U, X86::RFP64RegisterClass); 6637 return std::make_pair(0U, X86::RFP80RegisterClass); 6638 case 'y': // MMX_REGS if MMX allowed. 6639 if (!Subtarget->hasMMX()) break; 6640 return std::make_pair(0U, X86::VR64RegisterClass); 6641 break; 6642 case 'Y': // SSE_REGS if SSE2 allowed 6643 if (!Subtarget->hasSSE2()) break; 6644 // FALL THROUGH. 6645 case 'x': // SSE_REGS if SSE1 allowed 6646 if (!Subtarget->hasSSE1()) break; 6647 6648 switch (VT) { 6649 default: break; 6650 // Scalar SSE types. 6651 case MVT::f32: 6652 case MVT::i32: 6653 return std::make_pair(0U, X86::FR32RegisterClass); 6654 case MVT::f64: 6655 case MVT::i64: 6656 return std::make_pair(0U, X86::FR64RegisterClass); 6657 // Vector types. 6658 case MVT::v16i8: 6659 case MVT::v8i16: 6660 case MVT::v4i32: 6661 case MVT::v2i64: 6662 case MVT::v4f32: 6663 case MVT::v2f64: 6664 return std::make_pair(0U, X86::VR128RegisterClass); 6665 } 6666 break; 6667 } 6668 } 6669 6670 // Use the default implementation in TargetLowering to convert the register 6671 // constraint into a member of a register class. 6672 std::pair<unsigned, const TargetRegisterClass*> Res; 6673 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 6674 6675 // Not found as a standard register? 6676 if (Res.second == 0) { 6677 // GCC calls "st(0)" just plain "st". 6678 if (StringsEqualNoCase("{st}", Constraint)) { 6679 Res.first = X86::ST0; 6680 Res.second = X86::RFP80RegisterClass; 6681 } 6682 6683 return Res; 6684 } 6685 6686 // Otherwise, check to see if this is a register class of the wrong value 6687 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 6688 // turn into {ax},{dx}. 6689 if (Res.second->hasType(VT)) 6690 return Res; // Correct type already, nothing to do. 6691 6692 // All of the single-register GCC register classes map their values onto 6693 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 6694 // really want an 8-bit or 32-bit register, map to the appropriate register 6695 // class and return the appropriate register. 6696 if (Res.second != X86::GR16RegisterClass) 6697 return Res; 6698 6699 if (VT == MVT::i8) { 6700 unsigned DestReg = 0; 6701 switch (Res.first) { 6702 default: break; 6703 case X86::AX: DestReg = X86::AL; break; 6704 case X86::DX: DestReg = X86::DL; break; 6705 case X86::CX: DestReg = X86::CL; break; 6706 case X86::BX: DestReg = X86::BL; break; 6707 } 6708 if (DestReg) { 6709 Res.first = DestReg; 6710 Res.second = Res.second = X86::GR8RegisterClass; 6711 } 6712 } else if (VT == MVT::i32) { 6713 unsigned DestReg = 0; 6714 switch (Res.first) { 6715 default: break; 6716 case X86::AX: DestReg = X86::EAX; break; 6717 case X86::DX: DestReg = X86::EDX; break; 6718 case X86::CX: DestReg = X86::ECX; break; 6719 case X86::BX: DestReg = X86::EBX; break; 6720 case X86::SI: DestReg = X86::ESI; break; 6721 case X86::DI: DestReg = X86::EDI; break; 6722 case X86::BP: DestReg = X86::EBP; break; 6723 case X86::SP: DestReg = X86::ESP; break; 6724 } 6725 if (DestReg) { 6726 Res.first = DestReg; 6727 Res.second = Res.second = X86::GR32RegisterClass; 6728 } 6729 } else if (VT == MVT::i64) { 6730 unsigned DestReg = 0; 6731 switch (Res.first) { 6732 default: break; 6733 case X86::AX: DestReg = X86::RAX; break; 6734 case X86::DX: DestReg = X86::RDX; break; 6735 case X86::CX: DestReg = X86::RCX; break; 6736 case X86::BX: DestReg = X86::RBX; break; 6737 case X86::SI: DestReg = X86::RSI; break; 6738 case X86::DI: DestReg = X86::RDI; break; 6739 case X86::BP: DestReg = X86::RBP; break; 6740 case X86::SP: DestReg = X86::RSP; break; 6741 } 6742 if (DestReg) { 6743 Res.first = DestReg; 6744 Res.second = Res.second = X86::GR64RegisterClass; 6745 } 6746 } 6747 6748 return Res; 6749} 6750