X86ISelLowering.cpp revision 15cbde3cf6542ec9c120f59d5d8f3586f5f332c6
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86ISelLowering.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86TargetMachine.h" 20#include "llvm/CallingConv.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/ADT/BitVector.h" 27#include "llvm/ADT/VectorExtras.h" 28#include "llvm/Analysis/ScalarEvolutionExpressions.h" 29#include "llvm/CodeGen/CallingConvLower.h" 30#include "llvm/CodeGen/MachineFrameInfo.h" 31#include "llvm/CodeGen/MachineFunction.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/MachineModuleInfo.h" 34#include "llvm/CodeGen/MachineRegisterInfo.h" 35#include "llvm/CodeGen/PseudoSourceValue.h" 36#include "llvm/CodeGen/SelectionDAG.h" 37#include "llvm/Support/MathExtras.h" 38#include "llvm/Support/Debug.h" 39#include "llvm/Target/TargetOptions.h" 40#include "llvm/ADT/SmallSet.h" 41#include "llvm/ADT/StringExtras.h" 42using namespace llvm; 43 44X86TargetLowering::X86TargetLowering(TargetMachine &TM) 45 : TargetLowering(TM) { 46 Subtarget = &TM.getSubtarget<X86Subtarget>(); 47 X86ScalarSSEf64 = Subtarget->hasSSE2(); 48 X86ScalarSSEf32 = Subtarget->hasSSE1(); 49 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 50 51 bool Fast = false; 52 53 RegInfo = TM.getRegisterInfo(); 54 55 // Set up the TargetLowering object. 56 57 // X86 is weird, it always uses i8 for shift amounts and setcc results. 58 setShiftAmountType(MVT::i8); 59 setSetCCResultContents(ZeroOrOneSetCCResult); 60 setSchedulingPreference(SchedulingForRegPressure); 61 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0 62 setStackPointerRegisterToSaveRestore(X86StackPtr); 63 64 if (Subtarget->isTargetDarwin()) { 65 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 66 setUseUnderscoreSetJmp(false); 67 setUseUnderscoreLongJmp(false); 68 } else if (Subtarget->isTargetMingw()) { 69 // MS runtime is weird: it exports _setjmp, but longjmp! 70 setUseUnderscoreSetJmp(true); 71 setUseUnderscoreLongJmp(false); 72 } else { 73 setUseUnderscoreSetJmp(true); 74 setUseUnderscoreLongJmp(true); 75 } 76 77 // Set up the register classes. 78 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 79 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 80 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 81 if (Subtarget->is64Bit()) 82 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 83 84 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote); 85 86 // We don't accept any truncstore of integer registers. 87 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 88 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 89 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 90 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 91 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 92 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 93 94 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 95 // operation. 96 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 97 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 98 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 99 100 if (Subtarget->is64Bit()) { 101 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 102 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 103 } else { 104 if (X86ScalarSSEf64) 105 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP. 106 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand); 107 else 108 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 109 } 110 111 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 112 // this operation. 113 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 114 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 115 // SSE has no i16 to fp conversion, only i32 116 if (X86ScalarSSEf32) { 117 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 118 // f32 and f64 cases are Legal, f80 case is not 119 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 120 } else { 121 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 122 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 123 } 124 125 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 126 // are Legal, f80 is custom lowered. 127 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 128 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 129 130 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 131 // this operation. 132 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 133 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 134 135 if (X86ScalarSSEf32) { 136 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 137 // f32 and f64 cases are Legal, f80 case is not 138 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 139 } else { 140 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 141 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 142 } 143 144 // Handle FP_TO_UINT by promoting the destination to a larger signed 145 // conversion. 146 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 147 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 148 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 149 150 if (Subtarget->is64Bit()) { 151 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 152 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 153 } else { 154 if (X86ScalarSSEf32 && !Subtarget->hasSSE3()) 155 // Expand FP_TO_UINT into a select. 156 // FIXME: We would like to use a Custom expander here eventually to do 157 // the optimal thing for SSE vs. the default expansion in the legalizer. 158 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 159 else 160 // With SSE3 we can use fisttpll to convert to a signed i64. 161 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 162 } 163 164 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 165 if (!X86ScalarSSEf64) { 166 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 167 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 168 } 169 170 // Scalar integer divide and remainder are lowered to use operations that 171 // produce two results, to match the available instructions. This exposes 172 // the two-result form to trivial CSE, which is able to combine x/y and x%y 173 // into a single instruction. 174 // 175 // Scalar integer multiply-high is also lowered to use two-result 176 // operations, to match the available instructions. However, plain multiply 177 // (low) operations are left as Legal, as there are single-result 178 // instructions for this in x86. Using the two-result multiply instructions 179 // when both high and low results are needed must be arranged by dagcombine. 180 setOperationAction(ISD::MULHS , MVT::i8 , Expand); 181 setOperationAction(ISD::MULHU , MVT::i8 , Expand); 182 setOperationAction(ISD::SDIV , MVT::i8 , Expand); 183 setOperationAction(ISD::UDIV , MVT::i8 , Expand); 184 setOperationAction(ISD::SREM , MVT::i8 , Expand); 185 setOperationAction(ISD::UREM , MVT::i8 , Expand); 186 setOperationAction(ISD::MULHS , MVT::i16 , Expand); 187 setOperationAction(ISD::MULHU , MVT::i16 , Expand); 188 setOperationAction(ISD::SDIV , MVT::i16 , Expand); 189 setOperationAction(ISD::UDIV , MVT::i16 , Expand); 190 setOperationAction(ISD::SREM , MVT::i16 , Expand); 191 setOperationAction(ISD::UREM , MVT::i16 , Expand); 192 setOperationAction(ISD::MULHS , MVT::i32 , Expand); 193 setOperationAction(ISD::MULHU , MVT::i32 , Expand); 194 setOperationAction(ISD::SDIV , MVT::i32 , Expand); 195 setOperationAction(ISD::UDIV , MVT::i32 , Expand); 196 setOperationAction(ISD::SREM , MVT::i32 , Expand); 197 setOperationAction(ISD::UREM , MVT::i32 , Expand); 198 setOperationAction(ISD::MULHS , MVT::i64 , Expand); 199 setOperationAction(ISD::MULHU , MVT::i64 , Expand); 200 setOperationAction(ISD::SDIV , MVT::i64 , Expand); 201 setOperationAction(ISD::UDIV , MVT::i64 , Expand); 202 setOperationAction(ISD::SREM , MVT::i64 , Expand); 203 setOperationAction(ISD::UREM , MVT::i64 , Expand); 204 205 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 206 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 207 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 208 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 209 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); 210 if (Subtarget->is64Bit()) 211 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 212 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 213 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 214 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 215 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 216 setOperationAction(ISD::FREM , MVT::f32 , Expand); 217 setOperationAction(ISD::FREM , MVT::f64 , Expand); 218 setOperationAction(ISD::FREM , MVT::f80 , Expand); 219 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 220 221 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 222 setOperationAction(ISD::CTTZ , MVT::i8 , Custom); 223 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 224 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 225 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 226 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 227 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 228 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 229 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 230 if (Subtarget->is64Bit()) { 231 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 232 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 233 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 234 } 235 236 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 237 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 238 239 // These should be promoted to a larger select which is supported. 240 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 241 setOperationAction(ISD::SELECT , MVT::i8 , Promote); 242 // X86 wants to expand cmov itself. 243 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 244 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 245 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 246 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 247 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 248 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 249 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 250 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 251 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 252 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 253 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 254 if (Subtarget->is64Bit()) { 255 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 256 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 257 } 258 // X86 ret instruction may pop stack. 259 setOperationAction(ISD::RET , MVT::Other, Custom); 260 if (!Subtarget->is64Bit()) 261 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 262 263 // Darwin ABI issue. 264 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 265 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 266 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 267 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 268 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 269 if (Subtarget->is64Bit()) { 270 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 271 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 272 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 273 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 274 } 275 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 276 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 277 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 278 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 279 if (Subtarget->is64Bit()) { 280 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 281 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 282 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 283 } 284 // X86 wants to expand memset / memcpy itself. 285 setOperationAction(ISD::MEMSET , MVT::Other, Custom); 286 setOperationAction(ISD::MEMCPY , MVT::Other, Custom); 287 288 if (Subtarget->hasSSE1()) 289 setOperationAction(ISD::PREFETCH , MVT::Other, Legal); 290 291 if (!Subtarget->hasSSE2()) 292 setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand); 293 294 setOperationAction(ISD::ATOMIC_LCS , MVT::i8, Custom); 295 setOperationAction(ISD::ATOMIC_LCS , MVT::i16, Custom); 296 setOperationAction(ISD::ATOMIC_LCS , MVT::i32, Custom); 297 setOperationAction(ISD::ATOMIC_LCS , MVT::i64, Custom); 298 299 // Use the default ISD::LOCATION, ISD::DECLARE expansion. 300 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 301 // FIXME - use subtarget debug flags 302 if (!Subtarget->isTargetDarwin() && 303 !Subtarget->isTargetELF() && 304 !Subtarget->isTargetCygMing()) 305 setOperationAction(ISD::LABEL, MVT::Other, Expand); 306 307 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 308 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 309 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 310 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 311 if (Subtarget->is64Bit()) { 312 // FIXME: Verify 313 setExceptionPointerRegister(X86::RAX); 314 setExceptionSelectorRegister(X86::RDX); 315 } else { 316 setExceptionPointerRegister(X86::EAX); 317 setExceptionSelectorRegister(X86::EDX); 318 } 319 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 320 321 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 322 323 setOperationAction(ISD::TRAP, MVT::Other, Legal); 324 325 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 326 setOperationAction(ISD::VASTART , MVT::Other, Custom); 327 setOperationAction(ISD::VAARG , MVT::Other, Expand); 328 setOperationAction(ISD::VAEND , MVT::Other, Expand); 329 if (Subtarget->is64Bit()) 330 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 331 else 332 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 333 334 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 335 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 336 if (Subtarget->is64Bit()) 337 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 338 if (Subtarget->isTargetCygMing()) 339 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 340 else 341 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 342 343 if (X86ScalarSSEf64) { 344 // f32 and f64 use SSE. 345 // Set up the FP register classes. 346 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 347 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 348 349 // Use ANDPD to simulate FABS. 350 setOperationAction(ISD::FABS , MVT::f64, Custom); 351 setOperationAction(ISD::FABS , MVT::f32, Custom); 352 353 // Use XORP to simulate FNEG. 354 setOperationAction(ISD::FNEG , MVT::f64, Custom); 355 setOperationAction(ISD::FNEG , MVT::f32, Custom); 356 357 // Use ANDPD and ORPD to simulate FCOPYSIGN. 358 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 359 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 360 361 // We don't support sin/cos/fmod 362 setOperationAction(ISD::FSIN , MVT::f64, Expand); 363 setOperationAction(ISD::FCOS , MVT::f64, Expand); 364 setOperationAction(ISD::FSIN , MVT::f32, Expand); 365 setOperationAction(ISD::FCOS , MVT::f32, Expand); 366 367 // Expand FP immediates into loads from the stack, except for the special 368 // cases we handle. 369 addLegalFPImmediate(APFloat(+0.0)); // xorpd 370 addLegalFPImmediate(APFloat(+0.0f)); // xorps 371 372 // Floating truncations from f80 and extensions to f80 go through memory. 373 // If optimizing, we lie about this though and handle it in 374 // InstructionSelectPreprocess so that dagcombine2 can hack on these. 375 if (Fast) { 376 setConvertAction(MVT::f32, MVT::f80, Expand); 377 setConvertAction(MVT::f64, MVT::f80, Expand); 378 setConvertAction(MVT::f80, MVT::f32, Expand); 379 setConvertAction(MVT::f80, MVT::f64, Expand); 380 } 381 } else if (X86ScalarSSEf32) { 382 // Use SSE for f32, x87 for f64. 383 // Set up the FP register classes. 384 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 385 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 386 387 // Use ANDPS to simulate FABS. 388 setOperationAction(ISD::FABS , MVT::f32, Custom); 389 390 // Use XORP to simulate FNEG. 391 setOperationAction(ISD::FNEG , MVT::f32, Custom); 392 393 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 394 395 // Use ANDPS and ORPS to simulate FCOPYSIGN. 396 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 397 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 398 399 // We don't support sin/cos/fmod 400 setOperationAction(ISD::FSIN , MVT::f32, Expand); 401 setOperationAction(ISD::FCOS , MVT::f32, Expand); 402 403 // Special cases we handle for FP constants. 404 addLegalFPImmediate(APFloat(+0.0f)); // xorps 405 addLegalFPImmediate(APFloat(+0.0)); // FLD0 406 addLegalFPImmediate(APFloat(+1.0)); // FLD1 407 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 408 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 409 410 // SSE <-> X87 conversions go through memory. If optimizing, we lie about 411 // this though and handle it in InstructionSelectPreprocess so that 412 // dagcombine2 can hack on these. 413 if (Fast) { 414 setConvertAction(MVT::f32, MVT::f64, Expand); 415 setConvertAction(MVT::f32, MVT::f80, Expand); 416 setConvertAction(MVT::f80, MVT::f32, Expand); 417 setConvertAction(MVT::f64, MVT::f32, Expand); 418 // And x87->x87 truncations also. 419 setConvertAction(MVT::f80, MVT::f64, Expand); 420 } 421 422 if (!UnsafeFPMath) { 423 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 424 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 425 } 426 } else { 427 // f32 and f64 in x87. 428 // Set up the FP register classes. 429 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 430 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 431 432 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 433 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 434 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 435 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 436 437 // Floating truncations go through memory. If optimizing, we lie about 438 // this though and handle it in InstructionSelectPreprocess so that 439 // dagcombine2 can hack on these. 440 if (Fast) { 441 setConvertAction(MVT::f80, MVT::f32, Expand); 442 setConvertAction(MVT::f64, MVT::f32, Expand); 443 setConvertAction(MVT::f80, MVT::f64, Expand); 444 } 445 446 if (!UnsafeFPMath) { 447 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 448 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 449 } 450 addLegalFPImmediate(APFloat(+0.0)); // FLD0 451 addLegalFPImmediate(APFloat(+1.0)); // FLD1 452 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 453 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 454 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 455 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 456 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 457 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 458 } 459 460 // Long double always uses X87. 461 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 462 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 463 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 464 { 465 APFloat TmpFlt(+0.0); 466 TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 467 addLegalFPImmediate(TmpFlt); // FLD0 468 TmpFlt.changeSign(); 469 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 470 APFloat TmpFlt2(+1.0); 471 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 472 addLegalFPImmediate(TmpFlt2); // FLD1 473 TmpFlt2.changeSign(); 474 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 475 } 476 477 if (!UnsafeFPMath) { 478 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 479 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 480 } 481 482 // Always use a library call for pow. 483 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 484 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 485 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 486 487 // First set operation action for all vector types to expand. Then we 488 // will selectively turn on ones that can be effectively codegen'd. 489 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 490 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 491 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); 492 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); 493 setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); 494 setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); 495 setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); 496 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 497 setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); 498 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 499 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 500 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 501 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 502 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 503 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); 504 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); 505 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 506 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 507 setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand); 508 setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand); 509 setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand); 510 setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand); 511 setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand); 512 setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand); 513 setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand); 514 setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand); 515 setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand); 516 setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand); 517 setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand); 518 setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand); 519 setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand); 520 setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand); 521 setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand); 522 setOperationAction(ISD::SHL, (MVT::ValueType)VT, Expand); 523 setOperationAction(ISD::SRA, (MVT::ValueType)VT, Expand); 524 setOperationAction(ISD::SRL, (MVT::ValueType)VT, Expand); 525 setOperationAction(ISD::ROTL, (MVT::ValueType)VT, Expand); 526 setOperationAction(ISD::ROTR, (MVT::ValueType)VT, Expand); 527 setOperationAction(ISD::BSWAP, (MVT::ValueType)VT, Expand); 528 } 529 530 if (Subtarget->hasMMX()) { 531 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass); 532 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass); 533 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass); 534 addRegisterClass(MVT::v1i64, X86::VR64RegisterClass); 535 536 // FIXME: add MMX packed arithmetics 537 538 setOperationAction(ISD::ADD, MVT::v8i8, Legal); 539 setOperationAction(ISD::ADD, MVT::v4i16, Legal); 540 setOperationAction(ISD::ADD, MVT::v2i32, Legal); 541 setOperationAction(ISD::ADD, MVT::v1i64, Legal); 542 543 setOperationAction(ISD::SUB, MVT::v8i8, Legal); 544 setOperationAction(ISD::SUB, MVT::v4i16, Legal); 545 setOperationAction(ISD::SUB, MVT::v2i32, Legal); 546 setOperationAction(ISD::SUB, MVT::v1i64, Legal); 547 548 setOperationAction(ISD::MULHS, MVT::v4i16, Legal); 549 setOperationAction(ISD::MUL, MVT::v4i16, Legal); 550 551 setOperationAction(ISD::AND, MVT::v8i8, Promote); 552 AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64); 553 setOperationAction(ISD::AND, MVT::v4i16, Promote); 554 AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64); 555 setOperationAction(ISD::AND, MVT::v2i32, Promote); 556 AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64); 557 setOperationAction(ISD::AND, MVT::v1i64, Legal); 558 559 setOperationAction(ISD::OR, MVT::v8i8, Promote); 560 AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64); 561 setOperationAction(ISD::OR, MVT::v4i16, Promote); 562 AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64); 563 setOperationAction(ISD::OR, MVT::v2i32, Promote); 564 AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64); 565 setOperationAction(ISD::OR, MVT::v1i64, Legal); 566 567 setOperationAction(ISD::XOR, MVT::v8i8, Promote); 568 AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64); 569 setOperationAction(ISD::XOR, MVT::v4i16, Promote); 570 AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64); 571 setOperationAction(ISD::XOR, MVT::v2i32, Promote); 572 AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64); 573 setOperationAction(ISD::XOR, MVT::v1i64, Legal); 574 575 setOperationAction(ISD::LOAD, MVT::v8i8, Promote); 576 AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64); 577 setOperationAction(ISD::LOAD, MVT::v4i16, Promote); 578 AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64); 579 setOperationAction(ISD::LOAD, MVT::v2i32, Promote); 580 AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64); 581 setOperationAction(ISD::LOAD, MVT::v1i64, Legal); 582 583 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); 584 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 585 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); 586 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); 587 588 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 589 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 590 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); 591 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); 592 593 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom); 594 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom); 595 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom); 596 } 597 598 if (Subtarget->hasSSE1()) { 599 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 600 601 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 602 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 603 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 604 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 605 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 606 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 607 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 608 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 609 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 610 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 611 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 612 } 613 614 if (Subtarget->hasSSE2()) { 615 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 616 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 617 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 618 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 619 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 620 621 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 622 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 623 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 624 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 625 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 626 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 627 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 628 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 629 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 630 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 631 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 632 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 633 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 634 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 635 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 636 637 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 638 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 639 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 640 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 641 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 642 643 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 644 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 645 // Do not attempt to custom lower non-power-of-2 vectors 646 if (!isPowerOf2_32(MVT::getVectorNumElements(VT))) 647 continue; 648 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); 649 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); 650 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); 651 } 652 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 653 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 654 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 655 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 656 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 657 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 658 if (Subtarget->is64Bit()) { 659 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 660 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 661 } 662 663 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 664 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 665 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); 666 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); 667 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); 668 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); 669 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); 670 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); 671 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); 672 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); 673 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 674 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); 675 } 676 677 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 678 679 // Custom lower v2i64 and v2f64 selects. 680 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 681 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 682 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 683 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 684 } 685 686 if (Subtarget->hasSSE41()) { 687 // FIXME: Do we need to handle scalar-to-vector here? 688 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 689 690 // i8 and i16 vectors are custom , because the source register and source 691 // source memory operand types are not the same width. f32 vectors are 692 // custom since the immediate controlling the insert encodes additional 693 // information. 694 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 695 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 696 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Legal); 697 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 698 699 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 700 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 701 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 702 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 703 704 if (Subtarget->is64Bit()) { 705 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal); 706 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 707 } 708 } 709 710 // We want to custom lower some of our intrinsics. 711 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 712 713 // We have target-specific dag combine patterns for the following nodes: 714 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 715 setTargetDAGCombine(ISD::SELECT); 716 setTargetDAGCombine(ISD::STORE); 717 718 computeRegisterProperties(); 719 720 // FIXME: These should be based on subtarget info. Plus, the values should 721 // be smaller when we are in optimizing for size mode. 722 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores 723 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores 724 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores 725 allowUnalignedMemoryAccesses = true; // x86 supports it! 726 setPrefLoopAlignment(16); 727} 728 729 730MVT::ValueType 731X86TargetLowering::getSetCCResultType(const SDOperand &) const { 732 return MVT::i8; 733} 734 735 736/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 737/// the desired ByVal argument alignment. 738static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) { 739 if (MaxAlign == 16) 740 return; 741 if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 742 if (VTy->getBitWidth() == 128) 743 MaxAlign = 16; 744 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 745 unsigned EltAlign = 0; 746 getMaxByValAlign(ATy->getElementType(), EltAlign); 747 if (EltAlign > MaxAlign) 748 MaxAlign = EltAlign; 749 } else if (const StructType *STy = dyn_cast<StructType>(Ty)) { 750 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 751 unsigned EltAlign = 0; 752 getMaxByValAlign(STy->getElementType(i), EltAlign); 753 if (EltAlign > MaxAlign) 754 MaxAlign = EltAlign; 755 if (MaxAlign == 16) 756 break; 757 } 758 } 759 return; 760} 761 762/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 763/// function arguments in the caller parameter area. For X86, aggregates 764/// that contain SSE vectors are placed at 16-byte boundaries while the rest 765/// are at 4-byte boundaries. 766unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const { 767 if (Subtarget->is64Bit()) 768 return getTargetData()->getABITypeAlignment(Ty); 769 unsigned Align = 4; 770 if (Subtarget->hasSSE1()) 771 getMaxByValAlign(Ty, Align); 772 return Align; 773} 774 775/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 776/// jumptable. 777SDOperand X86TargetLowering::getPICJumpTableRelocBase(SDOperand Table, 778 SelectionDAG &DAG) const { 779 if (usesGlobalOffsetTable()) 780 return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy()); 781 if (!Subtarget->isPICStyleRIPRel()) 782 return DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()); 783 return Table; 784} 785 786//===----------------------------------------------------------------------===// 787// Return Value Calling Convention Implementation 788//===----------------------------------------------------------------------===// 789 790#include "X86GenCallingConv.inc" 791 792/// GetPossiblePreceedingTailCall - Get preceeding X86ISD::TAILCALL node if it 793/// exists skip possible ISD:TokenFactor. 794static SDOperand GetPossiblePreceedingTailCall(SDOperand Chain) { 795 if (Chain.getOpcode() == X86ISD::TAILCALL) { 796 return Chain; 797 } else if (Chain.getOpcode() == ISD::TokenFactor) { 798 if (Chain.getNumOperands() && 799 Chain.getOperand(0).getOpcode() == X86ISD::TAILCALL) 800 return Chain.getOperand(0); 801 } 802 return Chain; 803} 804 805/// LowerRET - Lower an ISD::RET node. 806SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { 807 assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args"); 808 809 SmallVector<CCValAssign, 16> RVLocs; 810 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 811 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 812 CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs); 813 CCInfo.AnalyzeReturn(Op.Val, RetCC_X86); 814 815 // If this is the first return lowered for this function, add the regs to the 816 // liveout set for the function. 817 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 818 for (unsigned i = 0; i != RVLocs.size(); ++i) 819 if (RVLocs[i].isRegLoc()) 820 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 821 } 822 SDOperand Chain = Op.getOperand(0); 823 824 // Handle tail call return. 825 Chain = GetPossiblePreceedingTailCall(Chain); 826 if (Chain.getOpcode() == X86ISD::TAILCALL) { 827 SDOperand TailCall = Chain; 828 SDOperand TargetAddress = TailCall.getOperand(1); 829 SDOperand StackAdjustment = TailCall.getOperand(2); 830 assert(((TargetAddress.getOpcode() == ISD::Register && 831 (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::ECX || 832 cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) || 833 TargetAddress.getOpcode() == ISD::TargetExternalSymbol || 834 TargetAddress.getOpcode() == ISD::TargetGlobalAddress) && 835 "Expecting an global address, external symbol, or register"); 836 assert(StackAdjustment.getOpcode() == ISD::Constant && 837 "Expecting a const value"); 838 839 SmallVector<SDOperand,8> Operands; 840 Operands.push_back(Chain.getOperand(0)); 841 Operands.push_back(TargetAddress); 842 Operands.push_back(StackAdjustment); 843 // Copy registers used by the call. Last operand is a flag so it is not 844 // copied. 845 for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) { 846 Operands.push_back(Chain.getOperand(i)); 847 } 848 return DAG.getNode(X86ISD::TC_RETURN, MVT::Other, &Operands[0], 849 Operands.size()); 850 } 851 852 // Regular return. 853 SDOperand Flag; 854 855 SmallVector<SDOperand, 6> RetOps; 856 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 857 // Operand #1 = Bytes To Pop 858 RetOps.push_back(DAG.getConstant(getBytesToPopOnReturn(), MVT::i16)); 859 860 // Copy the result values into the output registers. 861 for (unsigned i = 0; i != RVLocs.size(); ++i) { 862 CCValAssign &VA = RVLocs[i]; 863 assert(VA.isRegLoc() && "Can only return in registers!"); 864 SDOperand ValToCopy = Op.getOperand(i*2+1); 865 866 // Returns in ST0/ST1 are handled specially: these are pushed as operands to 867 // the RET instruction and handled by the FP Stackifier. 868 if (RVLocs[i].getLocReg() == X86::ST0 || 869 RVLocs[i].getLocReg() == X86::ST1) { 870 // If this is a copy from an xmm register to ST(0), use an FPExtend to 871 // change the value to the FP stack register class. 872 if (isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) 873 ValToCopy = DAG.getNode(ISD::FP_EXTEND, MVT::f80, ValToCopy); 874 RetOps.push_back(ValToCopy); 875 // Don't emit a copytoreg. 876 continue; 877 } 878 879 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), ValToCopy, Flag); 880 Flag = Chain.getValue(1); 881 } 882 883 RetOps[0] = Chain; // Update chain. 884 885 // Add the flag if we have it. 886 if (Flag.Val) 887 RetOps.push_back(Flag); 888 889 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, &RetOps[0], RetOps.size()); 890} 891 892 893/// LowerCallResult - Lower the result values of an ISD::CALL into the 894/// appropriate copies out of appropriate physical registers. This assumes that 895/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call 896/// being lowered. The returns a SDNode with the same number of values as the 897/// ISD::CALL. 898SDNode *X86TargetLowering:: 899LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, 900 unsigned CallingConv, SelectionDAG &DAG) { 901 902 // Assign locations to each value returned by this call. 903 SmallVector<CCValAssign, 16> RVLocs; 904 bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0; 905 CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs); 906 CCInfo.AnalyzeCallResult(TheCall, RetCC_X86); 907 908 SmallVector<SDOperand, 8> ResultVals; 909 910 // Copy all of the result registers out of their specified physreg. 911 for (unsigned i = 0; i != RVLocs.size(); ++i) { 912 MVT::ValueType CopyVT = RVLocs[i].getValVT(); 913 914 // If this is a call to a function that returns an fp value on the floating 915 // point stack, but where we prefer to use the value in xmm registers, copy 916 // it out as F80 and use a truncate to move it from fp stack reg to xmm reg. 917 if (RVLocs[i].getLocReg() == X86::ST0 && 918 isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) { 919 CopyVT = MVT::f80; 920 } 921 922 Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(), 923 CopyVT, InFlag).getValue(1); 924 SDOperand Val = Chain.getValue(0); 925 InFlag = Chain.getValue(2); 926 927 if (CopyVT != RVLocs[i].getValVT()) { 928 // Round the F80 the right size, which also moves to the appropriate xmm 929 // register. 930 Val = DAG.getNode(ISD::FP_ROUND, RVLocs[i].getValVT(), Val, 931 // This truncation won't change the value. 932 DAG.getIntPtrConstant(1)); 933 } 934 935 ResultVals.push_back(Val); 936 } 937 938 // Merge everything together with a MERGE_VALUES node. 939 ResultVals.push_back(Chain); 940 return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(), 941 &ResultVals[0], ResultVals.size()).Val; 942} 943 944/// LowerCallResultToTwo64BitRegs - Lower the result values of an x86-64 945/// ISD::CALL where the results are known to be in two 64-bit registers, 946/// e.g. XMM0 and XMM1. This simplify store the two values back to the 947/// fixed stack slot allocated for StructRet. 948SDNode *X86TargetLowering:: 949LowerCallResultToTwo64BitRegs(SDOperand Chain, SDOperand InFlag, 950 SDNode *TheCall, unsigned Reg1, unsigned Reg2, 951 MVT::ValueType VT, SelectionDAG &DAG) { 952 SDOperand RetVal1 = DAG.getCopyFromReg(Chain, Reg1, VT, InFlag); 953 Chain = RetVal1.getValue(1); 954 InFlag = RetVal1.getValue(2); 955 SDOperand RetVal2 = DAG.getCopyFromReg(Chain, Reg2, VT, InFlag); 956 Chain = RetVal2.getValue(1); 957 InFlag = RetVal2.getValue(2); 958 SDOperand FIN = TheCall->getOperand(5); 959 Chain = DAG.getStore(Chain, RetVal1, FIN, NULL, 0); 960 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); 961 Chain = DAG.getStore(Chain, RetVal2, FIN, NULL, 0); 962 return Chain.Val; 963} 964 965/// LowerCallResultToTwoX87Regs - Lower the result values of an x86-64 ISD::CALL 966/// where the results are known to be in ST0 and ST1. 967SDNode *X86TargetLowering:: 968LowerCallResultToTwoX87Regs(SDOperand Chain, SDOperand InFlag, 969 SDNode *TheCall, SelectionDAG &DAG) { 970 SmallVector<SDOperand, 8> ResultVals; 971 const MVT::ValueType VTs[] = { MVT::f80, MVT::f80, MVT::Other, MVT::Flag }; 972 SDVTList Tys = DAG.getVTList(VTs, 4); 973 SDOperand Ops[] = { Chain, InFlag }; 974 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_ST0_ST1, Tys, Ops, 2); 975 Chain = RetVal.getValue(2); 976 SDOperand FIN = TheCall->getOperand(5); 977 Chain = DAG.getStore(Chain, RetVal.getValue(1), FIN, NULL, 0); 978 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(16)); 979 Chain = DAG.getStore(Chain, RetVal, FIN, NULL, 0); 980 return Chain.Val; 981} 982 983//===----------------------------------------------------------------------===// 984// C & StdCall & Fast Calling Convention implementation 985//===----------------------------------------------------------------------===// 986// StdCall calling convention seems to be standard for many Windows' API 987// routines and around. It differs from C calling convention just a little: 988// callee should clean up the stack, not caller. Symbols should be also 989// decorated in some fancy way :) It doesn't support any vector arguments. 990// For info on fast calling convention see Fast Calling Convention (tail call) 991// implementation LowerX86_32FastCCCallTo. 992 993/// AddLiveIn - This helper function adds the specified physical register to the 994/// MachineFunction as a live in value. It also creates a corresponding virtual 995/// register for it. 996static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg, 997 const TargetRegisterClass *RC) { 998 assert(RC->contains(PReg) && "Not the correct regclass!"); 999 unsigned VReg = MF.getRegInfo().createVirtualRegister(RC); 1000 MF.getRegInfo().addLiveIn(PReg, VReg); 1001 return VReg; 1002} 1003 1004/// CallIsStructReturn - Determines whether a CALL node uses struct return 1005/// semantics. 1006static bool CallIsStructReturn(SDOperand Op) { 1007 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 1008 if (!NumOps) 1009 return false; 1010 1011 ConstantSDNode *Flags = cast<ConstantSDNode>(Op.getOperand(6)); 1012 return Flags->getValue() & ISD::ParamFlags::StructReturn; 1013} 1014 1015/// ArgsAreStructReturn - Determines whether a FORMAL_ARGUMENTS node uses struct 1016/// return semantics. 1017static bool ArgsAreStructReturn(SDOperand Op) { 1018 unsigned NumArgs = Op.Val->getNumValues() - 1; 1019 if (!NumArgs) 1020 return false; 1021 1022 ConstantSDNode *Flags = cast<ConstantSDNode>(Op.getOperand(3)); 1023 return Flags->getValue() & ISD::ParamFlags::StructReturn; 1024} 1025 1026/// IsCalleePop - Determines whether a CALL or FORMAL_ARGUMENTS node requires the 1027/// callee to pop its own arguments. Callee pop is necessary to support tail 1028/// calls. 1029bool X86TargetLowering::IsCalleePop(SDOperand Op) { 1030 bool IsVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1031 if (IsVarArg) 1032 return false; 1033 1034 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { 1035 default: 1036 return false; 1037 case CallingConv::X86_StdCall: 1038 return !Subtarget->is64Bit(); 1039 case CallingConv::X86_FastCall: 1040 return !Subtarget->is64Bit(); 1041 case CallingConv::Fast: 1042 return PerformTailCallOpt; 1043 } 1044} 1045 1046/// CCAssignFnForNode - Selects the correct CCAssignFn for a CALL or 1047/// FORMAL_ARGUMENTS node. 1048CCAssignFn *X86TargetLowering::CCAssignFnForNode(SDOperand Op) const { 1049 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1050 1051 if (Subtarget->is64Bit()) { 1052 if (CC == CallingConv::Fast && PerformTailCallOpt) 1053 return CC_X86_64_TailCall; 1054 else 1055 return CC_X86_64_C; 1056 } 1057 1058 if (CC == CallingConv::X86_FastCall) 1059 return CC_X86_32_FastCall; 1060 else if (CC == CallingConv::Fast && PerformTailCallOpt) 1061 return CC_X86_32_TailCall; 1062 else 1063 return CC_X86_32_C; 1064} 1065 1066/// NameDecorationForFORMAL_ARGUMENTS - Selects the appropriate decoration to 1067/// apply to a MachineFunction containing a given FORMAL_ARGUMENTS node. 1068NameDecorationStyle 1069X86TargetLowering::NameDecorationForFORMAL_ARGUMENTS(SDOperand Op) { 1070 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1071 if (CC == CallingConv::X86_FastCall) 1072 return FastCall; 1073 else if (CC == CallingConv::X86_StdCall) 1074 return StdCall; 1075 return None; 1076} 1077 1078/// IsPossiblyOverwrittenArgumentOfTailCall - Check if the operand could 1079/// possibly be overwritten when lowering the outgoing arguments in a tail 1080/// call. Currently the implementation of this call is very conservative and 1081/// assumes all arguments sourcing from FORMAL_ARGUMENTS or a CopyFromReg with 1082/// virtual registers would be overwritten by direct lowering. 1083static bool IsPossiblyOverwrittenArgumentOfTailCall(SDOperand Op, 1084 MachineFrameInfo * MFI) { 1085 RegisterSDNode * OpReg = NULL; 1086 FrameIndexSDNode * FrameIdxNode = NULL; 1087 int FrameIdx = 0; 1088 if (Op.getOpcode() == ISD::FORMAL_ARGUMENTS || 1089 (Op.getOpcode()== ISD::CopyFromReg && 1090 (OpReg = dyn_cast<RegisterSDNode>(Op.getOperand(1))) && 1091 (OpReg->getReg() >= TargetRegisterInfo::FirstVirtualRegister)) || 1092 (Op.getOpcode() == ISD::LOAD && 1093 (FrameIdxNode = dyn_cast<FrameIndexSDNode>(Op.getOperand(1))) && 1094 (MFI->isFixedObjectIndex((FrameIdx = FrameIdxNode->getIndex()))) && 1095 (MFI->getObjectOffset(FrameIdx) >= 0))) 1096 return true; 1097 return false; 1098} 1099 1100/// CallRequiresGOTInRegister - Check whether the call requires the GOT pointer 1101/// in a register before calling. 1102bool X86TargetLowering::CallRequiresGOTPtrInReg(bool Is64Bit, bool IsTailCall) { 1103 return !IsTailCall && !Is64Bit && 1104 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1105 Subtarget->isPICStyleGOT(); 1106} 1107 1108 1109/// CallRequiresFnAddressInReg - Check whether the call requires the function 1110/// address to be loaded in a register. 1111bool 1112X86TargetLowering::CallRequiresFnAddressInReg(bool Is64Bit, bool IsTailCall) { 1113 return !Is64Bit && IsTailCall && 1114 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1115 Subtarget->isPICStyleGOT(); 1116} 1117 1118/// CopyTailCallClobberedArgumentsToVRegs - Create virtual registers for all 1119/// arguments to force loading and guarantee that arguments sourcing from 1120/// incomming parameters are not overwriting each other. 1121static SDOperand 1122CopyTailCallClobberedArgumentsToVRegs(SDOperand Chain, 1123 SmallVector<std::pair<unsigned, SDOperand>, 8> &TailCallClobberedVRegs, 1124 SelectionDAG &DAG, 1125 MachineFunction &MF, 1126 const TargetLowering * TL) { 1127 1128 SDOperand InFlag; 1129 for (unsigned i = 0, e = TailCallClobberedVRegs.size(); i != e; i++) { 1130 SDOperand Arg = TailCallClobberedVRegs[i].second; 1131 unsigned Idx = TailCallClobberedVRegs[i].first; 1132 unsigned VReg = 1133 MF.getRegInfo(). 1134 createVirtualRegister(TL->getRegClassFor(Arg.getValueType())); 1135 Chain = DAG.getCopyToReg(Chain, VReg, Arg, InFlag); 1136 InFlag = Chain.getValue(1); 1137 Arg = DAG.getCopyFromReg(Chain, VReg, Arg.getValueType(), InFlag); 1138 TailCallClobberedVRegs[i] = std::make_pair(Idx, Arg); 1139 Chain = Arg.getValue(1); 1140 InFlag = Arg.getValue(2); 1141 } 1142 return Chain; 1143} 1144 1145/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1146/// by "Src" to address "Dst" with size and alignment information specified by 1147/// the specific parameter attribute. The copy will be passed as a byval function 1148/// parameter. 1149static SDOperand 1150CreateCopyOfByValArgument(SDOperand Src, SDOperand Dst, SDOperand Chain, 1151 ISD::ParamFlags::ParamFlagsTy Flags, 1152 SelectionDAG &DAG) { 1153 unsigned Align = ISD::ParamFlags::One << 1154 ((Flags & ISD::ParamFlags::ByValAlign) >> ISD::ParamFlags::ByValAlignOffs); 1155 unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >> 1156 ISD::ParamFlags::ByValSizeOffs; 1157 SDOperand AlignNode = DAG.getConstant(Align, MVT::i32); 1158 SDOperand SizeNode = DAG.getConstant(Size, MVT::i32); 1159 SDOperand AlwaysInline = DAG.getConstant(1, MVT::i32); 1160 return DAG.getMemcpy(Chain, Dst, Src, SizeNode, AlignNode, AlwaysInline); 1161} 1162 1163SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG, 1164 const CCValAssign &VA, 1165 MachineFrameInfo *MFI, 1166 unsigned CC, 1167 SDOperand Root, unsigned i) { 1168 // Create the nodes corresponding to a load from this parameter slot. 1169 ISD::ParamFlags::ParamFlagsTy Flags = 1170 cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue(); 1171 bool AlwaysUseMutable = (CC==CallingConv::Fast) && PerformTailCallOpt; 1172 bool isByVal = Flags & ISD::ParamFlags::ByVal; 1173 bool isImmutable = !AlwaysUseMutable && !isByVal; 1174 1175 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1176 // changed with more analysis. 1177 // In case of tail call optimization mark all arguments mutable. Since they 1178 // could be overwritten by lowering of arguments in case of a tail call. 1179 int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, 1180 VA.getLocMemOffset(), isImmutable); 1181 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 1182 if (isByVal) 1183 return FIN; 1184 return DAG.getLoad(VA.getValVT(), Root, FIN, 1185 PseudoSourceValue::getFixedStack(), FI); 1186} 1187 1188SDOperand 1189X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { 1190 MachineFunction &MF = DAG.getMachineFunction(); 1191 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1192 1193 const Function* Fn = MF.getFunction(); 1194 if (Fn->hasExternalLinkage() && 1195 Subtarget->isTargetCygMing() && 1196 Fn->getName() == "main") 1197 FuncInfo->setForceFramePointer(true); 1198 1199 // Decorate the function name. 1200 FuncInfo->setDecorationStyle(NameDecorationForFORMAL_ARGUMENTS(Op)); 1201 1202 MachineFrameInfo *MFI = MF.getFrameInfo(); 1203 SDOperand Root = Op.getOperand(0); 1204 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1205 unsigned CC = MF.getFunction()->getCallingConv(); 1206 bool Is64Bit = Subtarget->is64Bit(); 1207 1208 assert(!(isVarArg && CC == CallingConv::Fast) && 1209 "Var args not supported with calling convention fastcc"); 1210 1211 // Assign locations to all of the incoming arguments. 1212 SmallVector<CCValAssign, 16> ArgLocs; 1213 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1214 CCInfo.AnalyzeFormalArguments(Op.Val, CCAssignFnForNode(Op)); 1215 1216 SmallVector<SDOperand, 8> ArgValues; 1217 unsigned LastVal = ~0U; 1218 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1219 CCValAssign &VA = ArgLocs[i]; 1220 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1221 // places. 1222 assert(VA.getValNo() != LastVal && 1223 "Don't support value assigned to multiple locs yet"); 1224 LastVal = VA.getValNo(); 1225 1226 if (VA.isRegLoc()) { 1227 MVT::ValueType RegVT = VA.getLocVT(); 1228 TargetRegisterClass *RC; 1229 if (RegVT == MVT::i32) 1230 RC = X86::GR32RegisterClass; 1231 else if (Is64Bit && RegVT == MVT::i64) 1232 RC = X86::GR64RegisterClass; 1233 else if (RegVT == MVT::f32) 1234 RC = X86::FR32RegisterClass; 1235 else if (RegVT == MVT::f64) 1236 RC = X86::FR64RegisterClass; 1237 else { 1238 assert(MVT::isVector(RegVT)); 1239 if (Is64Bit && MVT::getSizeInBits(RegVT) == 64) { 1240 RC = X86::GR64RegisterClass; // MMX values are passed in GPRs. 1241 RegVT = MVT::i64; 1242 } else 1243 RC = X86::VR128RegisterClass; 1244 } 1245 1246 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 1247 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 1248 1249 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1250 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1251 // right size. 1252 if (VA.getLocInfo() == CCValAssign::SExt) 1253 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 1254 DAG.getValueType(VA.getValVT())); 1255 else if (VA.getLocInfo() == CCValAssign::ZExt) 1256 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 1257 DAG.getValueType(VA.getValVT())); 1258 1259 if (VA.getLocInfo() != CCValAssign::Full) 1260 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 1261 1262 // Handle MMX values passed in GPRs. 1263 if (Is64Bit && RegVT != VA.getLocVT() && RC == X86::GR64RegisterClass && 1264 MVT::getSizeInBits(RegVT) == 64) 1265 ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); 1266 1267 ArgValues.push_back(ArgValue); 1268 } else { 1269 assert(VA.isMemLoc()); 1270 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, CC, Root, i)); 1271 } 1272 } 1273 1274 unsigned StackSize = CCInfo.getNextStackOffset(); 1275 // align stack specially for tail calls 1276 if (CC == CallingConv::Fast) 1277 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1278 1279 // If the function takes variable number of arguments, make a frame index for 1280 // the start of the first vararg value... for expansion of llvm.va_start. 1281 if (isVarArg) { 1282 if (Is64Bit || CC != CallingConv::X86_FastCall) { 1283 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); 1284 } 1285 if (Is64Bit) { 1286 static const unsigned GPR64ArgRegs[] = { 1287 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1288 }; 1289 static const unsigned XMMArgRegs[] = { 1290 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1291 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1292 }; 1293 1294 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 6); 1295 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1296 1297 // For X86-64, if there are vararg parameters that are passed via 1298 // registers, then we must store them to their spots on the stack so they 1299 // may be loaded by deferencing the result of va_next. 1300 VarArgsGPOffset = NumIntRegs * 8; 1301 VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16; 1302 RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16); 1303 1304 // Store the integer parameter registers. 1305 SmallVector<SDOperand, 8> MemOps; 1306 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 1307 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1308 DAG.getIntPtrConstant(VarArgsGPOffset)); 1309 for (; NumIntRegs != 6; ++NumIntRegs) { 1310 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], 1311 X86::GR64RegisterClass); 1312 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1313 SDOperand Store = 1314 DAG.getStore(Val.getValue(1), Val, FIN, 1315 PseudoSourceValue::getFixedStack(), 1316 RegSaveFrameIndex); 1317 MemOps.push_back(Store); 1318 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1319 DAG.getIntPtrConstant(8)); 1320 } 1321 1322 // Now store the XMM (fp + vector) parameter registers. 1323 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1324 DAG.getIntPtrConstant(VarArgsFPOffset)); 1325 for (; NumXMMRegs != 8; ++NumXMMRegs) { 1326 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 1327 X86::VR128RegisterClass); 1328 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); 1329 SDOperand Store = 1330 DAG.getStore(Val.getValue(1), Val, FIN, 1331 PseudoSourceValue::getFixedStack(), 1332 RegSaveFrameIndex); 1333 MemOps.push_back(Store); 1334 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1335 DAG.getIntPtrConstant(16)); 1336 } 1337 if (!MemOps.empty()) 1338 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 1339 &MemOps[0], MemOps.size()); 1340 } 1341 } 1342 1343 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1344 // arguments and the arguments after the retaddr has been pushed are 1345 // aligned. 1346 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1347 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1348 (StackSize & 7) == 0) 1349 StackSize += 4; 1350 1351 ArgValues.push_back(Root); 1352 1353 // Some CCs need callee pop. 1354 if (IsCalleePop(Op)) { 1355 BytesToPopOnReturn = StackSize; // Callee pops everything. 1356 BytesCallerReserves = 0; 1357 } else { 1358 BytesToPopOnReturn = 0; // Callee pops nothing. 1359 // If this is an sret function, the return should pop the hidden pointer. 1360 if (!Is64Bit && ArgsAreStructReturn(Op)) 1361 BytesToPopOnReturn = 4; 1362 BytesCallerReserves = StackSize; 1363 } 1364 1365 if (!Is64Bit) { 1366 RegSaveFrameIndex = 0xAAAAAAA; // RegSaveFrameIndex is X86-64 only. 1367 if (CC == CallingConv::X86_FastCall) 1368 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 1369 } 1370 1371 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 1372 1373 // Return the new list of results. 1374 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1375 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1376} 1377 1378SDOperand 1379X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG, 1380 const SDOperand &StackPtr, 1381 const CCValAssign &VA, 1382 SDOperand Chain, 1383 SDOperand Arg) { 1384 unsigned LocMemOffset = VA.getLocMemOffset(); 1385 SDOperand PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1386 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1387 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1388 ISD::ParamFlags::ParamFlagsTy Flags = 1389 cast<ConstantSDNode>(FlagsOp)->getValue(); 1390 if (Flags & ISD::ParamFlags::ByVal) { 1391 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG); 1392 } 1393 return DAG.getStore(Chain, Arg, PtrOff, 1394 PseudoSourceValue::getStack(), LocMemOffset); 1395} 1396 1397/// ClassifyX86_64SRetCallReturn - Classify how to implement a x86-64 1398/// struct return call to the specified function. X86-64 ABI specifies 1399/// some SRet calls are actually returned in registers. Since current 1400/// LLVM cannot represent multi-value calls, they are represent as 1401/// calls where the results are passed in a hidden struct provided by 1402/// the caller. This function examines the type of the struct to 1403/// determine the correct way to implement the call. 1404X86::X86_64SRet 1405X86TargetLowering::ClassifyX86_64SRetCallReturn(const Function *Fn) { 1406 // FIXME: Disabled for now. 1407 return X86::InMemory; 1408 1409 const PointerType *PTy = cast<PointerType>(Fn->arg_begin()->getType()); 1410 const Type *RTy = PTy->getElementType(); 1411 unsigned Size = getTargetData()->getABITypeSize(RTy); 1412 if (Size != 16 && Size != 32) 1413 return X86::InMemory; 1414 1415 if (Size == 32) { 1416 const StructType *STy = dyn_cast<StructType>(RTy); 1417 if (!STy) return X86::InMemory; 1418 if (STy->getNumElements() == 2 && 1419 STy->getElementType(0) == Type::X86_FP80Ty && 1420 STy->getElementType(1) == Type::X86_FP80Ty) 1421 return X86::InX87; 1422 } 1423 1424 bool AllFP = true; 1425 for (Type::subtype_iterator I = RTy->subtype_begin(), E = RTy->subtype_end(); 1426 I != E; ++I) { 1427 const Type *STy = I->get(); 1428 if (!STy->isFPOrFPVector()) { 1429 AllFP = false; 1430 break; 1431 } 1432 } 1433 1434 if (AllFP) 1435 return X86::InSSE; 1436 return X86::InGPR64; 1437} 1438 1439void X86TargetLowering::X86_64AnalyzeSRetCallOperands(SDNode *TheCall, 1440 CCAssignFn *Fn, 1441 CCState &CCInfo) { 1442 unsigned NumOps = (TheCall->getNumOperands() - 5) / 2; 1443 for (unsigned i = 1; i != NumOps; ++i) { 1444 MVT::ValueType ArgVT = TheCall->getOperand(5+2*i).getValueType(); 1445 SDOperand FlagOp = TheCall->getOperand(5+2*i+1); 1446 unsigned ArgFlags =cast<ConstantSDNode>(FlagOp)->getValue(); 1447 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo)) { 1448 cerr << "Call operand #" << i << " has unhandled type " 1449 << MVT::getValueTypeString(ArgVT) << "\n"; 1450 abort(); 1451 } 1452 } 1453} 1454 1455SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { 1456 MachineFunction &MF = DAG.getMachineFunction(); 1457 MachineFrameInfo * MFI = MF.getFrameInfo(); 1458 SDOperand Chain = Op.getOperand(0); 1459 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1460 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1461 bool IsTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0 1462 && CC == CallingConv::Fast && PerformTailCallOpt; 1463 SDOperand Callee = Op.getOperand(4); 1464 bool Is64Bit = Subtarget->is64Bit(); 1465 bool IsStructRet = CallIsStructReturn(Op); 1466 1467 assert(!(isVarArg && CC == CallingConv::Fast) && 1468 "Var args not supported with calling convention fastcc"); 1469 1470 // Analyze operands of the call, assigning locations to each operand. 1471 SmallVector<CCValAssign, 16> ArgLocs; 1472 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1473 CCAssignFn *CCFn = CCAssignFnForNode(Op); 1474 1475 X86::X86_64SRet SRetMethod = X86::InMemory; 1476 if (Is64Bit && IsStructRet) 1477 // FIXME: We can't figure out type of the sret structure for indirect 1478 // calls. We need to copy more information from CallSite to the ISD::CALL 1479 // node. 1480 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1481 SRetMethod = 1482 ClassifyX86_64SRetCallReturn(dyn_cast<Function>(G->getGlobal())); 1483 1484 // UGLY HACK! For x86-64, some 128-bit aggregates are returns in a pair of 1485 // registers. Unfortunately, llvm does not support i128 yet so we pretend it's 1486 // a sret call. 1487 if (SRetMethod != X86::InMemory) 1488 X86_64AnalyzeSRetCallOperands(Op.Val, CCFn, CCInfo); 1489 else 1490 CCInfo.AnalyzeCallOperands(Op.Val, CCFn); 1491 1492 // Get a count of how many bytes are to be pushed on the stack. 1493 unsigned NumBytes = CCInfo.getNextStackOffset(); 1494 if (CC == CallingConv::Fast) 1495 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 1496 1497 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1498 // arguments and the arguments after the retaddr has been pushed are aligned. 1499 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1500 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1501 (NumBytes & 7) == 0) 1502 NumBytes += 4; 1503 1504 int FPDiff = 0; 1505 if (IsTailCall) { 1506 // Lower arguments at fp - stackoffset + fpdiff. 1507 unsigned NumBytesCallerPushed = 1508 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 1509 FPDiff = NumBytesCallerPushed - NumBytes; 1510 1511 // Set the delta of movement of the returnaddr stackslot. 1512 // But only set if delta is greater than previous delta. 1513 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 1514 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 1515 } 1516 1517 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes)); 1518 1519 SDOperand RetAddrFrIdx; 1520 if (IsTailCall) { 1521 // Adjust the Return address stack slot. 1522 if (FPDiff) { 1523 MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32; 1524 RetAddrFrIdx = getReturnAddressFrameIndex(DAG); 1525 // Load the "old" Return address. 1526 RetAddrFrIdx = 1527 DAG.getLoad(VT, Chain,RetAddrFrIdx, NULL, 0); 1528 Chain = SDOperand(RetAddrFrIdx.Val, 1); 1529 } 1530 } 1531 1532 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1533 SmallVector<std::pair<unsigned, SDOperand>, 8> TailCallClobberedVRegs; 1534 SmallVector<SDOperand, 8> MemOpChains; 1535 1536 SDOperand StackPtr; 1537 1538 // Walk the register/memloc assignments, inserting copies/loads. For tail 1539 // calls, remember all arguments for later special lowering. 1540 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1541 CCValAssign &VA = ArgLocs[i]; 1542 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1543 1544 // Promote the value if needed. 1545 switch (VA.getLocInfo()) { 1546 default: assert(0 && "Unknown loc info!"); 1547 case CCValAssign::Full: break; 1548 case CCValAssign::SExt: 1549 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1550 break; 1551 case CCValAssign::ZExt: 1552 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1553 break; 1554 case CCValAssign::AExt: 1555 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1556 break; 1557 } 1558 1559 if (VA.isRegLoc()) { 1560 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1561 } else { 1562 if (!IsTailCall) { 1563 assert(VA.isMemLoc()); 1564 if (StackPtr.Val == 0) 1565 StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy()); 1566 1567 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1568 Arg)); 1569 } else if (IsPossiblyOverwrittenArgumentOfTailCall(Arg, MFI)) { 1570 TailCallClobberedVRegs.push_back(std::make_pair(i,Arg)); 1571 } 1572 } 1573 } 1574 1575 if (!MemOpChains.empty()) 1576 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1577 &MemOpChains[0], MemOpChains.size()); 1578 1579 // Build a sequence of copy-to-reg nodes chained together with token chain 1580 // and flag operands which copy the outgoing args into registers. 1581 SDOperand InFlag; 1582 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1583 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1584 InFlag); 1585 InFlag = Chain.getValue(1); 1586 } 1587 1588 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1589 // GOT pointer. 1590 if (CallRequiresGOTPtrInReg(Is64Bit, IsTailCall)) { 1591 Chain = DAG.getCopyToReg(Chain, X86::EBX, 1592 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 1593 InFlag); 1594 InFlag = Chain.getValue(1); 1595 } 1596 // If we are tail calling and generating PIC/GOT style code load the address 1597 // of the callee into ecx. The value in ecx is used as target of the tail 1598 // jump. This is done to circumvent the ebx/callee-saved problem for tail 1599 // calls on PIC/GOT architectures. Normally we would just put the address of 1600 // GOT into ebx and then call target@PLT. But for tail callss ebx would be 1601 // restored (since ebx is callee saved) before jumping to the target@PLT. 1602 if (CallRequiresFnAddressInReg(Is64Bit, IsTailCall)) { 1603 // Note: The actual moving to ecx is done further down. 1604 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 1605 if (G && !G->getGlobal()->hasHiddenVisibility() && 1606 !G->getGlobal()->hasProtectedVisibility()) 1607 Callee = LowerGlobalAddress(Callee, DAG); 1608 else if (isa<ExternalSymbolSDNode>(Callee)) 1609 Callee = LowerExternalSymbol(Callee,DAG); 1610 } 1611 1612 if (Is64Bit && isVarArg) { 1613 // From AMD64 ABI document: 1614 // For calls that may call functions that use varargs or stdargs 1615 // (prototype-less calls or calls to functions containing ellipsis (...) in 1616 // the declaration) %al is used as hidden argument to specify the number 1617 // of SSE registers used. The contents of %al do not need to match exactly 1618 // the number of registers, but must be an ubound on the number of SSE 1619 // registers used and is in the range 0 - 8 inclusive. 1620 1621 // Count the number of XMM registers allocated. 1622 static const unsigned XMMArgRegs[] = { 1623 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1624 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1625 }; 1626 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1627 1628 Chain = DAG.getCopyToReg(Chain, X86::AL, 1629 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 1630 InFlag = Chain.getValue(1); 1631 } 1632 1633 1634 // For tail calls lower the arguments to the 'real' stack slot. 1635 if (IsTailCall) { 1636 SmallVector<SDOperand, 8> MemOpChains2; 1637 SDOperand FIN; 1638 int FI = 0; 1639 // Do not flag preceeding copytoreg stuff together with the following stuff. 1640 InFlag = SDOperand(); 1641 1642 Chain = CopyTailCallClobberedArgumentsToVRegs(Chain, TailCallClobberedVRegs, 1643 DAG, MF, this); 1644 1645 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1646 CCValAssign &VA = ArgLocs[i]; 1647 if (!VA.isRegLoc()) { 1648 assert(VA.isMemLoc()); 1649 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1650 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1651 ISD::ParamFlags::ParamFlagsTy Flags = 1652 cast<ConstantSDNode>(FlagsOp)->getValue(); 1653 // Create frame index. 1654 int32_t Offset = VA.getLocMemOffset()+FPDiff; 1655 uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8; 1656 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); 1657 FIN = DAG.getFrameIndex(FI, MVT::i32); 1658 1659 // Find virtual register for this argument. 1660 bool Found=false; 1661 for (unsigned idx=0, e= TailCallClobberedVRegs.size(); idx < e; idx++) 1662 if (TailCallClobberedVRegs[idx].first==i) { 1663 Arg = TailCallClobberedVRegs[idx].second; 1664 Found=true; 1665 break; 1666 } 1667 assert(IsPossiblyOverwrittenArgumentOfTailCall(Arg, MFI)==false || 1668 (Found==true && "No corresponding Argument was found")); 1669 1670 if (Flags & ISD::ParamFlags::ByVal) { 1671 // Copy relative to framepointer. 1672 MemOpChains2.push_back(CreateCopyOfByValArgument(Arg, FIN, Chain, 1673 Flags, DAG)); 1674 } else { 1675 // Store relative to framepointer. 1676 MemOpChains2.push_back( 1677 DAG.getStore(Chain, Arg, FIN, 1678 PseudoSourceValue::getFixedStack(), FI)); 1679 } 1680 } 1681 } 1682 1683 if (!MemOpChains2.empty()) 1684 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1685 &MemOpChains2[0], MemOpChains2.size()); 1686 1687 // Store the return address to the appropriate stack slot. 1688 if (FPDiff) { 1689 // Calculate the new stack slot for the return address. 1690 int SlotSize = Is64Bit ? 8 : 4; 1691 int NewReturnAddrFI = 1692 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize); 1693 MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32; 1694 SDOperand NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 1695 Chain = DAG.getStore(Chain, RetAddrFrIdx, NewRetAddrFrIdx, 1696 PseudoSourceValue::getFixedStack(), NewReturnAddrFI); 1697 } 1698 } 1699 1700 // If the callee is a GlobalAddress node (quite common, every direct call is) 1701 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1702 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1703 // We should use extra load for direct calls to dllimported functions in 1704 // non-JIT mode. 1705 if ((IsTailCall || !Is64Bit || 1706 getTargetMachine().getCodeModel() != CodeModel::Large) 1707 && !Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1708 getTargetMachine(), true)) 1709 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1710 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1711 if (IsTailCall || !Is64Bit || 1712 getTargetMachine().getCodeModel() != CodeModel::Large) 1713 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1714 } else if (IsTailCall) { 1715 unsigned Opc = Is64Bit ? X86::R9 : X86::ECX; 1716 1717 Chain = DAG.getCopyToReg(Chain, 1718 DAG.getRegister(Opc, getPointerTy()), 1719 Callee,InFlag); 1720 Callee = DAG.getRegister(Opc, getPointerTy()); 1721 // Add register as live out. 1722 DAG.getMachineFunction().getRegInfo().addLiveOut(Opc); 1723 } 1724 1725 // Returns a chain & a flag for retval copy to use. 1726 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1727 SmallVector<SDOperand, 8> Ops; 1728 1729 if (IsTailCall) { 1730 Ops.push_back(Chain); 1731 Ops.push_back(DAG.getIntPtrConstant(NumBytes)); 1732 Ops.push_back(DAG.getIntPtrConstant(0)); 1733 if (InFlag.Val) 1734 Ops.push_back(InFlag); 1735 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1736 InFlag = Chain.getValue(1); 1737 1738 // Returns a chain & a flag for retval copy to use. 1739 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1740 Ops.clear(); 1741 } 1742 1743 Ops.push_back(Chain); 1744 Ops.push_back(Callee); 1745 1746 if (IsTailCall) 1747 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 1748 1749 // Add argument registers to the end of the list so that they are known live 1750 // into the call. 1751 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1752 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1753 RegsToPass[i].second.getValueType())); 1754 1755 // Add an implicit use GOT pointer in EBX. 1756 if (!IsTailCall && !Is64Bit && 1757 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1758 Subtarget->isPICStyleGOT()) 1759 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 1760 1761 // Add an implicit use of AL for x86 vararg functions. 1762 if (Is64Bit && isVarArg) 1763 Ops.push_back(DAG.getRegister(X86::AL, MVT::i8)); 1764 1765 if (InFlag.Val) 1766 Ops.push_back(InFlag); 1767 1768 if (IsTailCall) { 1769 assert(InFlag.Val && 1770 "Flag must be set. Depend on flag being set in LowerRET"); 1771 Chain = DAG.getNode(X86ISD::TAILCALL, 1772 Op.Val->getVTList(), &Ops[0], Ops.size()); 1773 1774 return SDOperand(Chain.Val, Op.ResNo); 1775 } 1776 1777 Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size()); 1778 InFlag = Chain.getValue(1); 1779 1780 // Create the CALLSEQ_END node. 1781 unsigned NumBytesForCalleeToPush; 1782 if (IsCalleePop(Op)) 1783 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 1784 else if (!Is64Bit && IsStructRet) 1785 // If this is is a call to a struct-return function, the callee 1786 // pops the hidden struct pointer, so we have to push it back. 1787 // This is common for Darwin/X86, Linux & Mingw32 targets. 1788 NumBytesForCalleeToPush = 4; 1789 else 1790 NumBytesForCalleeToPush = 0; // Callee pops nothing. 1791 1792 // Returns a flag for retval copy to use. 1793 Chain = DAG.getCALLSEQ_END(Chain, 1794 DAG.getIntPtrConstant(NumBytes), 1795 DAG.getIntPtrConstant(NumBytesForCalleeToPush), 1796 InFlag); 1797 InFlag = Chain.getValue(1); 1798 1799 // Handle result values, copying them out of physregs into vregs that we 1800 // return. 1801 switch (SRetMethod) { 1802 default: 1803 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1804 case X86::InGPR64: 1805 return SDOperand(LowerCallResultToTwo64BitRegs(Chain, InFlag, Op.Val, 1806 X86::RAX, X86::RDX, 1807 MVT::i64, DAG), Op.ResNo); 1808 case X86::InSSE: 1809 return SDOperand(LowerCallResultToTwo64BitRegs(Chain, InFlag, Op.Val, 1810 X86::XMM0, X86::XMM1, 1811 MVT::f64, DAG), Op.ResNo); 1812 case X86::InX87: 1813 return SDOperand(LowerCallResultToTwoX87Regs(Chain, InFlag, Op.Val, DAG), 1814 Op.ResNo); 1815 } 1816} 1817 1818 1819//===----------------------------------------------------------------------===// 1820// Fast Calling Convention (tail call) implementation 1821//===----------------------------------------------------------------------===// 1822 1823// Like std call, callee cleans arguments, convention except that ECX is 1824// reserved for storing the tail called function address. Only 2 registers are 1825// free for argument passing (inreg). Tail call optimization is performed 1826// provided: 1827// * tailcallopt is enabled 1828// * caller/callee are fastcc 1829// On X86_64 architecture with GOT-style position independent code only local 1830// (within module) calls are supported at the moment. 1831// To keep the stack aligned according to platform abi the function 1832// GetAlignedArgumentStackSize ensures that argument delta is always multiples 1833// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 1834// If a tail called function callee has more arguments than the caller the 1835// caller needs to make sure that there is room to move the RETADDR to. This is 1836// achieved by reserving an area the size of the argument delta right after the 1837// original REtADDR, but before the saved framepointer or the spilled registers 1838// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 1839// stack layout: 1840// arg1 1841// arg2 1842// RETADDR 1843// [ new RETADDR 1844// move area ] 1845// (possible EBP) 1846// ESI 1847// EDI 1848// local1 .. 1849 1850/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 1851/// for a 16 byte align requirement. 1852unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 1853 SelectionDAG& DAG) { 1854 if (PerformTailCallOpt) { 1855 MachineFunction &MF = DAG.getMachineFunction(); 1856 const TargetMachine &TM = MF.getTarget(); 1857 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 1858 unsigned StackAlignment = TFI.getStackAlignment(); 1859 uint64_t AlignMask = StackAlignment - 1; 1860 int64_t Offset = StackSize; 1861 unsigned SlotSize = Subtarget->is64Bit() ? 8 : 4; 1862 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 1863 // Number smaller than 12 so just add the difference. 1864 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 1865 } else { 1866 // Mask out lower bits, add stackalignment once plus the 12 bytes. 1867 Offset = ((~AlignMask) & Offset) + StackAlignment + 1868 (StackAlignment-SlotSize); 1869 } 1870 StackSize = Offset; 1871 } 1872 return StackSize; 1873} 1874 1875/// IsEligibleForTailCallElimination - Check to see whether the next instruction 1876/// following the call is a return. A function is eligible if caller/callee 1877/// calling conventions match, currently only fastcc supports tail calls, and 1878/// the function CALL is immediatly followed by a RET. 1879bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call, 1880 SDOperand Ret, 1881 SelectionDAG& DAG) const { 1882 if (!PerformTailCallOpt) 1883 return false; 1884 1885 // Check whether CALL node immediatly preceeds the RET node and whether the 1886 // return uses the result of the node or is a void return. 1887 unsigned NumOps = Ret.getNumOperands(); 1888 if ((NumOps == 1 && 1889 (Ret.getOperand(0) == SDOperand(Call.Val,1) || 1890 Ret.getOperand(0) == SDOperand(Call.Val,0))) || 1891 (NumOps > 1 && 1892 Ret.getOperand(0) == SDOperand(Call.Val,Call.Val->getNumValues()-1) && 1893 Ret.getOperand(1) == SDOperand(Call.Val,0))) { 1894 MachineFunction &MF = DAG.getMachineFunction(); 1895 unsigned CallerCC = MF.getFunction()->getCallingConv(); 1896 unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue(); 1897 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 1898 SDOperand Callee = Call.getOperand(4); 1899 // On x86/32Bit PIC/GOT tail calls are supported. 1900 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ || 1901 !Subtarget->isPICStyleGOT()|| !Subtarget->is64Bit()) 1902 return true; 1903 1904 // Can only do local tail calls (in same module, hidden or protected) on 1905 // x86_64 PIC/GOT at the moment. 1906 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1907 return G->getGlobal()->hasHiddenVisibility() 1908 || G->getGlobal()->hasProtectedVisibility(); 1909 } 1910 } 1911 1912 return false; 1913} 1914 1915//===----------------------------------------------------------------------===// 1916// Other Lowering Hooks 1917//===----------------------------------------------------------------------===// 1918 1919 1920SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { 1921 MachineFunction &MF = DAG.getMachineFunction(); 1922 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1923 int ReturnAddrIndex = FuncInfo->getRAIndex(); 1924 1925 if (ReturnAddrIndex == 0) { 1926 // Set up a frame object for the return address. 1927 if (Subtarget->is64Bit()) 1928 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8); 1929 else 1930 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4); 1931 1932 FuncInfo->setRAIndex(ReturnAddrIndex); 1933 } 1934 1935 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 1936} 1937 1938 1939 1940/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86 1941/// specific condition code. It returns a false if it cannot do a direct 1942/// translation. X86CC is the translated CondCode. LHS/RHS are modified as 1943/// needed. 1944static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 1945 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS, 1946 SelectionDAG &DAG) { 1947 X86CC = X86::COND_INVALID; 1948 if (!isFP) { 1949 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 1950 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 1951 // X > -1 -> X == 0, jump !sign. 1952 RHS = DAG.getConstant(0, RHS.getValueType()); 1953 X86CC = X86::COND_NS; 1954 return true; 1955 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 1956 // X < 0 -> X == 0, jump on sign. 1957 X86CC = X86::COND_S; 1958 return true; 1959 } else if (SetCCOpcode == ISD::SETLT && RHSC->getValue() == 1) { 1960 // X < 1 -> X <= 0 1961 RHS = DAG.getConstant(0, RHS.getValueType()); 1962 X86CC = X86::COND_LE; 1963 return true; 1964 } 1965 } 1966 1967 switch (SetCCOpcode) { 1968 default: break; 1969 case ISD::SETEQ: X86CC = X86::COND_E; break; 1970 case ISD::SETGT: X86CC = X86::COND_G; break; 1971 case ISD::SETGE: X86CC = X86::COND_GE; break; 1972 case ISD::SETLT: X86CC = X86::COND_L; break; 1973 case ISD::SETLE: X86CC = X86::COND_LE; break; 1974 case ISD::SETNE: X86CC = X86::COND_NE; break; 1975 case ISD::SETULT: X86CC = X86::COND_B; break; 1976 case ISD::SETUGT: X86CC = X86::COND_A; break; 1977 case ISD::SETULE: X86CC = X86::COND_BE; break; 1978 case ISD::SETUGE: X86CC = X86::COND_AE; break; 1979 } 1980 } else { 1981 // On a floating point condition, the flags are set as follows: 1982 // ZF PF CF op 1983 // 0 | 0 | 0 | X > Y 1984 // 0 | 0 | 1 | X < Y 1985 // 1 | 0 | 0 | X == Y 1986 // 1 | 1 | 1 | unordered 1987 bool Flip = false; 1988 switch (SetCCOpcode) { 1989 default: break; 1990 case ISD::SETUEQ: 1991 case ISD::SETEQ: X86CC = X86::COND_E; break; 1992 case ISD::SETOLT: Flip = true; // Fallthrough 1993 case ISD::SETOGT: 1994 case ISD::SETGT: X86CC = X86::COND_A; break; 1995 case ISD::SETOLE: Flip = true; // Fallthrough 1996 case ISD::SETOGE: 1997 case ISD::SETGE: X86CC = X86::COND_AE; break; 1998 case ISD::SETUGT: Flip = true; // Fallthrough 1999 case ISD::SETULT: 2000 case ISD::SETLT: X86CC = X86::COND_B; break; 2001 case ISD::SETUGE: Flip = true; // Fallthrough 2002 case ISD::SETULE: 2003 case ISD::SETLE: X86CC = X86::COND_BE; break; 2004 case ISD::SETONE: 2005 case ISD::SETNE: X86CC = X86::COND_NE; break; 2006 case ISD::SETUO: X86CC = X86::COND_P; break; 2007 case ISD::SETO: X86CC = X86::COND_NP; break; 2008 } 2009 if (Flip) 2010 std::swap(LHS, RHS); 2011 } 2012 2013 return X86CC != X86::COND_INVALID; 2014} 2015 2016/// hasFPCMov - is there a floating point cmov for the specific X86 condition 2017/// code. Current x86 isa includes the following FP cmov instructions: 2018/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 2019static bool hasFPCMov(unsigned X86CC) { 2020 switch (X86CC) { 2021 default: 2022 return false; 2023 case X86::COND_B: 2024 case X86::COND_BE: 2025 case X86::COND_E: 2026 case X86::COND_P: 2027 case X86::COND_A: 2028 case X86::COND_AE: 2029 case X86::COND_NE: 2030 case X86::COND_NP: 2031 return true; 2032 } 2033} 2034 2035/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return 2036/// true if Op is undef or if its value falls within the specified range (L, H]. 2037static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { 2038 if (Op.getOpcode() == ISD::UNDEF) 2039 return true; 2040 2041 unsigned Val = cast<ConstantSDNode>(Op)->getValue(); 2042 return (Val >= Low && Val < Hi); 2043} 2044 2045/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return 2046/// true if Op is undef or if its value equal to the specified value. 2047static bool isUndefOrEqual(SDOperand Op, unsigned Val) { 2048 if (Op.getOpcode() == ISD::UNDEF) 2049 return true; 2050 return cast<ConstantSDNode>(Op)->getValue() == Val; 2051} 2052 2053/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand 2054/// specifies a shuffle of elements that is suitable for input to PSHUFD. 2055bool X86::isPSHUFDMask(SDNode *N) { 2056 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2057 2058 if (N->getNumOperands() != 2 && N->getNumOperands() != 4) 2059 return false; 2060 2061 // Check if the value doesn't reference the second vector. 2062 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2063 SDOperand Arg = N->getOperand(i); 2064 if (Arg.getOpcode() == ISD::UNDEF) continue; 2065 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2066 if (cast<ConstantSDNode>(Arg)->getValue() >= e) 2067 return false; 2068 } 2069 2070 return true; 2071} 2072 2073/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand 2074/// specifies a shuffle of elements that is suitable for input to PSHUFHW. 2075bool X86::isPSHUFHWMask(SDNode *N) { 2076 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2077 2078 if (N->getNumOperands() != 8) 2079 return false; 2080 2081 // Lower quadword copied in order. 2082 for (unsigned i = 0; i != 4; ++i) { 2083 SDOperand Arg = N->getOperand(i); 2084 if (Arg.getOpcode() == ISD::UNDEF) continue; 2085 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2086 if (cast<ConstantSDNode>(Arg)->getValue() != i) 2087 return false; 2088 } 2089 2090 // Upper quadword shuffled. 2091 for (unsigned i = 4; i != 8; ++i) { 2092 SDOperand Arg = N->getOperand(i); 2093 if (Arg.getOpcode() == ISD::UNDEF) continue; 2094 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2095 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2096 if (Val < 4 || Val > 7) 2097 return false; 2098 } 2099 2100 return true; 2101} 2102 2103/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand 2104/// specifies a shuffle of elements that is suitable for input to PSHUFLW. 2105bool X86::isPSHUFLWMask(SDNode *N) { 2106 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2107 2108 if (N->getNumOperands() != 8) 2109 return false; 2110 2111 // Upper quadword copied in order. 2112 for (unsigned i = 4; i != 8; ++i) 2113 if (!isUndefOrEqual(N->getOperand(i), i)) 2114 return false; 2115 2116 // Lower quadword shuffled. 2117 for (unsigned i = 0; i != 4; ++i) 2118 if (!isUndefOrInRange(N->getOperand(i), 0, 4)) 2119 return false; 2120 2121 return true; 2122} 2123 2124/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 2125/// specifies a shuffle of elements that is suitable for input to SHUFP*. 2126static bool isSHUFPMask(const SDOperand *Elems, unsigned NumElems) { 2127 if (NumElems != 2 && NumElems != 4) return false; 2128 2129 unsigned Half = NumElems / 2; 2130 for (unsigned i = 0; i < Half; ++i) 2131 if (!isUndefOrInRange(Elems[i], 0, NumElems)) 2132 return false; 2133 for (unsigned i = Half; i < NumElems; ++i) 2134 if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2)) 2135 return false; 2136 2137 return true; 2138} 2139 2140bool X86::isSHUFPMask(SDNode *N) { 2141 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2142 return ::isSHUFPMask(N->op_begin(), N->getNumOperands()); 2143} 2144 2145/// isCommutedSHUFP - Returns true if the shuffle mask is exactly 2146/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 2147/// half elements to come from vector 1 (which would equal the dest.) and 2148/// the upper half to come from vector 2. 2149static bool isCommutedSHUFP(const SDOperand *Ops, unsigned NumOps) { 2150 if (NumOps != 2 && NumOps != 4) return false; 2151 2152 unsigned Half = NumOps / 2; 2153 for (unsigned i = 0; i < Half; ++i) 2154 if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2)) 2155 return false; 2156 for (unsigned i = Half; i < NumOps; ++i) 2157 if (!isUndefOrInRange(Ops[i], 0, NumOps)) 2158 return false; 2159 return true; 2160} 2161 2162static bool isCommutedSHUFP(SDNode *N) { 2163 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2164 return isCommutedSHUFP(N->op_begin(), N->getNumOperands()); 2165} 2166 2167/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 2168/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 2169bool X86::isMOVHLPSMask(SDNode *N) { 2170 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2171 2172 if (N->getNumOperands() != 4) 2173 return false; 2174 2175 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 2176 return isUndefOrEqual(N->getOperand(0), 6) && 2177 isUndefOrEqual(N->getOperand(1), 7) && 2178 isUndefOrEqual(N->getOperand(2), 2) && 2179 isUndefOrEqual(N->getOperand(3), 3); 2180} 2181 2182/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 2183/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 2184/// <2, 3, 2, 3> 2185bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) { 2186 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2187 2188 if (N->getNumOperands() != 4) 2189 return false; 2190 2191 // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3 2192 return isUndefOrEqual(N->getOperand(0), 2) && 2193 isUndefOrEqual(N->getOperand(1), 3) && 2194 isUndefOrEqual(N->getOperand(2), 2) && 2195 isUndefOrEqual(N->getOperand(3), 3); 2196} 2197 2198/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 2199/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 2200bool X86::isMOVLPMask(SDNode *N) { 2201 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2202 2203 unsigned NumElems = N->getNumOperands(); 2204 if (NumElems != 2 && NumElems != 4) 2205 return false; 2206 2207 for (unsigned i = 0; i < NumElems/2; ++i) 2208 if (!isUndefOrEqual(N->getOperand(i), i + NumElems)) 2209 return false; 2210 2211 for (unsigned i = NumElems/2; i < NumElems; ++i) 2212 if (!isUndefOrEqual(N->getOperand(i), i)) 2213 return false; 2214 2215 return true; 2216} 2217 2218/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand 2219/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D} 2220/// and MOVLHPS. 2221bool X86::isMOVHPMask(SDNode *N) { 2222 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2223 2224 unsigned NumElems = N->getNumOperands(); 2225 if (NumElems != 2 && NumElems != 4) 2226 return false; 2227 2228 for (unsigned i = 0; i < NumElems/2; ++i) 2229 if (!isUndefOrEqual(N->getOperand(i), i)) 2230 return false; 2231 2232 for (unsigned i = 0; i < NumElems/2; ++i) { 2233 SDOperand Arg = N->getOperand(i + NumElems/2); 2234 if (!isUndefOrEqual(Arg, i + NumElems)) 2235 return false; 2236 } 2237 2238 return true; 2239} 2240 2241/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 2242/// specifies a shuffle of elements that is suitable for input to UNPCKL. 2243bool static isUNPCKLMask(const SDOperand *Elts, unsigned NumElts, 2244 bool V2IsSplat = false) { 2245 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2246 return false; 2247 2248 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2249 SDOperand BitI = Elts[i]; 2250 SDOperand BitI1 = Elts[i+1]; 2251 if (!isUndefOrEqual(BitI, j)) 2252 return false; 2253 if (V2IsSplat) { 2254 if (isUndefOrEqual(BitI1, NumElts)) 2255 return false; 2256 } else { 2257 if (!isUndefOrEqual(BitI1, j + NumElts)) 2258 return false; 2259 } 2260 } 2261 2262 return true; 2263} 2264 2265bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) { 2266 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2267 return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2268} 2269 2270/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 2271/// specifies a shuffle of elements that is suitable for input to UNPCKH. 2272bool static isUNPCKHMask(const SDOperand *Elts, unsigned NumElts, 2273 bool V2IsSplat = false) { 2274 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2275 return false; 2276 2277 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2278 SDOperand BitI = Elts[i]; 2279 SDOperand BitI1 = Elts[i+1]; 2280 if (!isUndefOrEqual(BitI, j + NumElts/2)) 2281 return false; 2282 if (V2IsSplat) { 2283 if (isUndefOrEqual(BitI1, NumElts)) 2284 return false; 2285 } else { 2286 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts)) 2287 return false; 2288 } 2289 } 2290 2291 return true; 2292} 2293 2294bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) { 2295 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2296 return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2297} 2298 2299/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 2300/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 2301/// <0, 0, 1, 1> 2302bool X86::isUNPCKL_v_undef_Mask(SDNode *N) { 2303 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2304 2305 unsigned NumElems = N->getNumOperands(); 2306 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2307 return false; 2308 2309 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2310 SDOperand BitI = N->getOperand(i); 2311 SDOperand BitI1 = N->getOperand(i+1); 2312 2313 if (!isUndefOrEqual(BitI, j)) 2314 return false; 2315 if (!isUndefOrEqual(BitI1, j)) 2316 return false; 2317 } 2318 2319 return true; 2320} 2321 2322/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 2323/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 2324/// <2, 2, 3, 3> 2325bool X86::isUNPCKH_v_undef_Mask(SDNode *N) { 2326 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2327 2328 unsigned NumElems = N->getNumOperands(); 2329 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2330 return false; 2331 2332 for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { 2333 SDOperand BitI = N->getOperand(i); 2334 SDOperand BitI1 = N->getOperand(i + 1); 2335 2336 if (!isUndefOrEqual(BitI, j)) 2337 return false; 2338 if (!isUndefOrEqual(BitI1, j)) 2339 return false; 2340 } 2341 2342 return true; 2343} 2344 2345/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 2346/// specifies a shuffle of elements that is suitable for input to MOVSS, 2347/// MOVSD, and MOVD, i.e. setting the lowest element. 2348static bool isMOVLMask(const SDOperand *Elts, unsigned NumElts) { 2349 if (NumElts != 2 && NumElts != 4) 2350 return false; 2351 2352 if (!isUndefOrEqual(Elts[0], NumElts)) 2353 return false; 2354 2355 for (unsigned i = 1; i < NumElts; ++i) { 2356 if (!isUndefOrEqual(Elts[i], i)) 2357 return false; 2358 } 2359 2360 return true; 2361} 2362 2363bool X86::isMOVLMask(SDNode *N) { 2364 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2365 return ::isMOVLMask(N->op_begin(), N->getNumOperands()); 2366} 2367 2368/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 2369/// of what x86 movss want. X86 movs requires the lowest element to be lowest 2370/// element of vector 2 and the other elements to come from vector 1 in order. 2371static bool isCommutedMOVL(const SDOperand *Ops, unsigned NumOps, 2372 bool V2IsSplat = false, 2373 bool V2IsUndef = false) { 2374 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 2375 return false; 2376 2377 if (!isUndefOrEqual(Ops[0], 0)) 2378 return false; 2379 2380 for (unsigned i = 1; i < NumOps; ++i) { 2381 SDOperand Arg = Ops[i]; 2382 if (!(isUndefOrEqual(Arg, i+NumOps) || 2383 (V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) || 2384 (V2IsSplat && isUndefOrEqual(Arg, NumOps)))) 2385 return false; 2386 } 2387 2388 return true; 2389} 2390 2391static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false, 2392 bool V2IsUndef = false) { 2393 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2394 return isCommutedMOVL(N->op_begin(), N->getNumOperands(), 2395 V2IsSplat, V2IsUndef); 2396} 2397 2398/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2399/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 2400bool X86::isMOVSHDUPMask(SDNode *N) { 2401 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2402 2403 if (N->getNumOperands() != 4) 2404 return false; 2405 2406 // Expect 1, 1, 3, 3 2407 for (unsigned i = 0; i < 2; ++i) { 2408 SDOperand Arg = N->getOperand(i); 2409 if (Arg.getOpcode() == ISD::UNDEF) continue; 2410 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2411 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2412 if (Val != 1) return false; 2413 } 2414 2415 bool HasHi = false; 2416 for (unsigned i = 2; i < 4; ++i) { 2417 SDOperand Arg = N->getOperand(i); 2418 if (Arg.getOpcode() == ISD::UNDEF) continue; 2419 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2420 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2421 if (Val != 3) return false; 2422 HasHi = true; 2423 } 2424 2425 // Don't use movshdup if it can be done with a shufps. 2426 return HasHi; 2427} 2428 2429/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2430/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 2431bool X86::isMOVSLDUPMask(SDNode *N) { 2432 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2433 2434 if (N->getNumOperands() != 4) 2435 return false; 2436 2437 // Expect 0, 0, 2, 2 2438 for (unsigned i = 0; i < 2; ++i) { 2439 SDOperand Arg = N->getOperand(i); 2440 if (Arg.getOpcode() == ISD::UNDEF) continue; 2441 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2442 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2443 if (Val != 0) return false; 2444 } 2445 2446 bool HasHi = false; 2447 for (unsigned i = 2; i < 4; ++i) { 2448 SDOperand Arg = N->getOperand(i); 2449 if (Arg.getOpcode() == ISD::UNDEF) continue; 2450 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2451 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2452 if (Val != 2) return false; 2453 HasHi = true; 2454 } 2455 2456 // Don't use movshdup if it can be done with a shufps. 2457 return HasHi; 2458} 2459 2460/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand 2461/// specifies a identity operation on the LHS or RHS. 2462static bool isIdentityMask(SDNode *N, bool RHS = false) { 2463 unsigned NumElems = N->getNumOperands(); 2464 for (unsigned i = 0; i < NumElems; ++i) 2465 if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0))) 2466 return false; 2467 return true; 2468} 2469 2470/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2471/// a splat of a single element. 2472static bool isSplatMask(SDNode *N) { 2473 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2474 2475 // This is a splat operation if each element of the permute is the same, and 2476 // if the value doesn't reference the second vector. 2477 unsigned NumElems = N->getNumOperands(); 2478 SDOperand ElementBase; 2479 unsigned i = 0; 2480 for (; i != NumElems; ++i) { 2481 SDOperand Elt = N->getOperand(i); 2482 if (isa<ConstantSDNode>(Elt)) { 2483 ElementBase = Elt; 2484 break; 2485 } 2486 } 2487 2488 if (!ElementBase.Val) 2489 return false; 2490 2491 for (; i != NumElems; ++i) { 2492 SDOperand Arg = N->getOperand(i); 2493 if (Arg.getOpcode() == ISD::UNDEF) continue; 2494 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2495 if (Arg != ElementBase) return false; 2496 } 2497 2498 // Make sure it is a splat of the first vector operand. 2499 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems; 2500} 2501 2502/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2503/// a splat of a single element and it's a 2 or 4 element mask. 2504bool X86::isSplatMask(SDNode *N) { 2505 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2506 2507 // We can only splat 64-bit, and 32-bit quantities with a single instruction. 2508 if (N->getNumOperands() != 4 && N->getNumOperands() != 2) 2509 return false; 2510 return ::isSplatMask(N); 2511} 2512 2513/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand 2514/// specifies a splat of zero element. 2515bool X86::isSplatLoMask(SDNode *N) { 2516 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2517 2518 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) 2519 if (!isUndefOrEqual(N->getOperand(i), 0)) 2520 return false; 2521 return true; 2522} 2523 2524/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 2525/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* 2526/// instructions. 2527unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 2528 unsigned NumOperands = N->getNumOperands(); 2529 unsigned Shift = (NumOperands == 4) ? 2 : 1; 2530 unsigned Mask = 0; 2531 for (unsigned i = 0; i < NumOperands; ++i) { 2532 unsigned Val = 0; 2533 SDOperand Arg = N->getOperand(NumOperands-i-1); 2534 if (Arg.getOpcode() != ISD::UNDEF) 2535 Val = cast<ConstantSDNode>(Arg)->getValue(); 2536 if (Val >= NumOperands) Val -= NumOperands; 2537 Mask |= Val; 2538 if (i != NumOperands - 1) 2539 Mask <<= Shift; 2540 } 2541 2542 return Mask; 2543} 2544 2545/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 2546/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW 2547/// instructions. 2548unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 2549 unsigned Mask = 0; 2550 // 8 nodes, but we only care about the last 4. 2551 for (unsigned i = 7; i >= 4; --i) { 2552 unsigned Val = 0; 2553 SDOperand Arg = N->getOperand(i); 2554 if (Arg.getOpcode() != ISD::UNDEF) 2555 Val = cast<ConstantSDNode>(Arg)->getValue(); 2556 Mask |= (Val - 4); 2557 if (i != 4) 2558 Mask <<= 2; 2559 } 2560 2561 return Mask; 2562} 2563 2564/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 2565/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW 2566/// instructions. 2567unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 2568 unsigned Mask = 0; 2569 // 8 nodes, but we only care about the first 4. 2570 for (int i = 3; i >= 0; --i) { 2571 unsigned Val = 0; 2572 SDOperand Arg = N->getOperand(i); 2573 if (Arg.getOpcode() != ISD::UNDEF) 2574 Val = cast<ConstantSDNode>(Arg)->getValue(); 2575 Mask |= Val; 2576 if (i != 0) 2577 Mask <<= 2; 2578 } 2579 2580 return Mask; 2581} 2582 2583/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand 2584/// specifies a 8 element shuffle that can be broken into a pair of 2585/// PSHUFHW and PSHUFLW. 2586static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { 2587 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2588 2589 if (N->getNumOperands() != 8) 2590 return false; 2591 2592 // Lower quadword shuffled. 2593 for (unsigned i = 0; i != 4; ++i) { 2594 SDOperand Arg = N->getOperand(i); 2595 if (Arg.getOpcode() == ISD::UNDEF) continue; 2596 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2597 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2598 if (Val >= 4) 2599 return false; 2600 } 2601 2602 // Upper quadword shuffled. 2603 for (unsigned i = 4; i != 8; ++i) { 2604 SDOperand Arg = N->getOperand(i); 2605 if (Arg.getOpcode() == ISD::UNDEF) continue; 2606 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2607 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2608 if (Val < 4 || Val > 7) 2609 return false; 2610 } 2611 2612 return true; 2613} 2614 2615/// CommuteVectorShuffle - Swap vector_shuffle operands as well as 2616/// values in ther permute mask. 2617static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, 2618 SDOperand &V2, SDOperand &Mask, 2619 SelectionDAG &DAG) { 2620 MVT::ValueType VT = Op.getValueType(); 2621 MVT::ValueType MaskVT = Mask.getValueType(); 2622 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2623 unsigned NumElems = Mask.getNumOperands(); 2624 SmallVector<SDOperand, 8> MaskVec; 2625 2626 for (unsigned i = 0; i != NumElems; ++i) { 2627 SDOperand Arg = Mask.getOperand(i); 2628 if (Arg.getOpcode() == ISD::UNDEF) { 2629 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2630 continue; 2631 } 2632 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2633 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2634 if (Val < NumElems) 2635 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2636 else 2637 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2638 } 2639 2640 std::swap(V1, V2); 2641 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2642 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2643} 2644 2645/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 2646/// the two vector operands have swapped position. 2647static 2648SDOperand CommuteVectorShuffleMask(SDOperand Mask, SelectionDAG &DAG) { 2649 MVT::ValueType MaskVT = Mask.getValueType(); 2650 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2651 unsigned NumElems = Mask.getNumOperands(); 2652 SmallVector<SDOperand, 8> MaskVec; 2653 for (unsigned i = 0; i != NumElems; ++i) { 2654 SDOperand Arg = Mask.getOperand(i); 2655 if (Arg.getOpcode() == ISD::UNDEF) { 2656 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2657 continue; 2658 } 2659 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2660 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2661 if (Val < NumElems) 2662 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2663 else 2664 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2665 } 2666 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2667} 2668 2669 2670/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 2671/// match movhlps. The lower half elements should come from upper half of 2672/// V1 (and in order), and the upper half elements should come from the upper 2673/// half of V2 (and in order). 2674static bool ShouldXformToMOVHLPS(SDNode *Mask) { 2675 unsigned NumElems = Mask->getNumOperands(); 2676 if (NumElems != 4) 2677 return false; 2678 for (unsigned i = 0, e = 2; i != e; ++i) 2679 if (!isUndefOrEqual(Mask->getOperand(i), i+2)) 2680 return false; 2681 for (unsigned i = 2; i != 4; ++i) 2682 if (!isUndefOrEqual(Mask->getOperand(i), i+4)) 2683 return false; 2684 return true; 2685} 2686 2687/// isScalarLoadToVector - Returns true if the node is a scalar load that 2688/// is promoted to a vector. 2689static inline bool isScalarLoadToVector(SDNode *N) { 2690 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) { 2691 N = N->getOperand(0).Val; 2692 return ISD::isNON_EXTLoad(N); 2693 } 2694 return false; 2695} 2696 2697/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 2698/// match movlp{s|d}. The lower half elements should come from lower half of 2699/// V1 (and in order), and the upper half elements should come from the upper 2700/// half of V2 (and in order). And since V1 will become the source of the 2701/// MOVLP, it must be either a vector load or a scalar load to vector. 2702static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) { 2703 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 2704 return false; 2705 // Is V2 is a vector load, don't do this transformation. We will try to use 2706 // load folding shufps op. 2707 if (ISD::isNON_EXTLoad(V2)) 2708 return false; 2709 2710 unsigned NumElems = Mask->getNumOperands(); 2711 if (NumElems != 2 && NumElems != 4) 2712 return false; 2713 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 2714 if (!isUndefOrEqual(Mask->getOperand(i), i)) 2715 return false; 2716 for (unsigned i = NumElems/2; i != NumElems; ++i) 2717 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems)) 2718 return false; 2719 return true; 2720} 2721 2722/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 2723/// all the same. 2724static bool isSplatVector(SDNode *N) { 2725 if (N->getOpcode() != ISD::BUILD_VECTOR) 2726 return false; 2727 2728 SDOperand SplatValue = N->getOperand(0); 2729 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 2730 if (N->getOperand(i) != SplatValue) 2731 return false; 2732 return true; 2733} 2734 2735/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2736/// to an undef. 2737static bool isUndefShuffle(SDNode *N) { 2738 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2739 return false; 2740 2741 SDOperand V1 = N->getOperand(0); 2742 SDOperand V2 = N->getOperand(1); 2743 SDOperand Mask = N->getOperand(2); 2744 unsigned NumElems = Mask.getNumOperands(); 2745 for (unsigned i = 0; i != NumElems; ++i) { 2746 SDOperand Arg = Mask.getOperand(i); 2747 if (Arg.getOpcode() != ISD::UNDEF) { 2748 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2749 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF) 2750 return false; 2751 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF) 2752 return false; 2753 } 2754 } 2755 return true; 2756} 2757 2758/// isZeroNode - Returns true if Elt is a constant zero or a floating point 2759/// constant +0.0. 2760static inline bool isZeroNode(SDOperand Elt) { 2761 return ((isa<ConstantSDNode>(Elt) && 2762 cast<ConstantSDNode>(Elt)->getValue() == 0) || 2763 (isa<ConstantFPSDNode>(Elt) && 2764 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 2765} 2766 2767/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2768/// to an zero vector. 2769static bool isZeroShuffle(SDNode *N) { 2770 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2771 return false; 2772 2773 SDOperand V1 = N->getOperand(0); 2774 SDOperand V2 = N->getOperand(1); 2775 SDOperand Mask = N->getOperand(2); 2776 unsigned NumElems = Mask.getNumOperands(); 2777 for (unsigned i = 0; i != NumElems; ++i) { 2778 SDOperand Arg = Mask.getOperand(i); 2779 if (Arg.getOpcode() == ISD::UNDEF) 2780 continue; 2781 2782 unsigned Idx = cast<ConstantSDNode>(Arg)->getValue(); 2783 if (Idx < NumElems) { 2784 unsigned Opc = V1.Val->getOpcode(); 2785 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.Val)) 2786 continue; 2787 if (Opc != ISD::BUILD_VECTOR || 2788 !isZeroNode(V1.Val->getOperand(Idx))) 2789 return false; 2790 } else if (Idx >= NumElems) { 2791 unsigned Opc = V2.Val->getOpcode(); 2792 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.Val)) 2793 continue; 2794 if (Opc != ISD::BUILD_VECTOR || 2795 !isZeroNode(V2.Val->getOperand(Idx - NumElems))) 2796 return false; 2797 } 2798 } 2799 return true; 2800} 2801 2802/// getZeroVector - Returns a vector of specified type with all zero elements. 2803/// 2804static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) { 2805 assert(MVT::isVector(VT) && "Expected a vector type"); 2806 2807 // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2808 // type. This ensures they get CSE'd. 2809 SDOperand Cst = DAG.getTargetConstant(0, MVT::i32); 2810 SDOperand Vec; 2811 if (MVT::getSizeInBits(VT) == 64) // MMX 2812 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2813 else // SSE 2814 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2815 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2816} 2817 2818/// getOnesVector - Returns a vector of specified type with all bits set. 2819/// 2820static SDOperand getOnesVector(MVT::ValueType VT, SelectionDAG &DAG) { 2821 assert(MVT::isVector(VT) && "Expected a vector type"); 2822 2823 // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2824 // type. This ensures they get CSE'd. 2825 SDOperand Cst = DAG.getTargetConstant(~0U, MVT::i32); 2826 SDOperand Vec; 2827 if (MVT::getSizeInBits(VT) == 64) // MMX 2828 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2829 else // SSE 2830 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2831 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2832} 2833 2834 2835/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 2836/// that point to V2 points to its first element. 2837static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { 2838 assert(Mask.getOpcode() == ISD::BUILD_VECTOR); 2839 2840 bool Changed = false; 2841 SmallVector<SDOperand, 8> MaskVec; 2842 unsigned NumElems = Mask.getNumOperands(); 2843 for (unsigned i = 0; i != NumElems; ++i) { 2844 SDOperand Arg = Mask.getOperand(i); 2845 if (Arg.getOpcode() != ISD::UNDEF) { 2846 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2847 if (Val > NumElems) { 2848 Arg = DAG.getConstant(NumElems, Arg.getValueType()); 2849 Changed = true; 2850 } 2851 } 2852 MaskVec.push_back(Arg); 2853 } 2854 2855 if (Changed) 2856 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), 2857 &MaskVec[0], MaskVec.size()); 2858 return Mask; 2859} 2860 2861/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 2862/// operation of specified width. 2863static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { 2864 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2865 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2866 2867 SmallVector<SDOperand, 8> MaskVec; 2868 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); 2869 for (unsigned i = 1; i != NumElems; ++i) 2870 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2871 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2872} 2873 2874/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation 2875/// of specified width. 2876static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { 2877 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2878 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2879 SmallVector<SDOperand, 8> MaskVec; 2880 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 2881 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2882 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); 2883 } 2884 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2885} 2886 2887/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation 2888/// of specified width. 2889static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { 2890 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2891 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2892 unsigned Half = NumElems/2; 2893 SmallVector<SDOperand, 8> MaskVec; 2894 for (unsigned i = 0; i != Half; ++i) { 2895 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); 2896 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); 2897 } 2898 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2899} 2900 2901/// getSwapEltZeroMask - Returns a vector_shuffle mask for a shuffle that swaps 2902/// element #0 of a vector with the specified index, leaving the rest of the 2903/// elements in place. 2904static SDOperand getSwapEltZeroMask(unsigned NumElems, unsigned DestElt, 2905 SelectionDAG &DAG) { 2906 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2907 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2908 SmallVector<SDOperand, 8> MaskVec; 2909 // Element #0 of the result gets the elt we are replacing. 2910 MaskVec.push_back(DAG.getConstant(DestElt, BaseVT)); 2911 for (unsigned i = 1; i != NumElems; ++i) 2912 MaskVec.push_back(DAG.getConstant(i == DestElt ? 0 : i, BaseVT)); 2913 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2914} 2915 2916/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32. 2917/// 2918static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) { 2919 SDOperand V1 = Op.getOperand(0); 2920 SDOperand Mask = Op.getOperand(2); 2921 MVT::ValueType VT = Op.getValueType(); 2922 unsigned NumElems = Mask.getNumOperands(); 2923 Mask = getUnpacklMask(NumElems, DAG); 2924 while (NumElems != 4) { 2925 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask); 2926 NumElems >>= 1; 2927 } 2928 V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1); 2929 2930 Mask = getZeroVector(MVT::v4i32, DAG); 2931 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1, 2932 DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask); 2933 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); 2934} 2935 2936/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 2937/// vector of zero or undef vector. This produces a shuffle where the low 2938/// element of V2 is swizzled into the zero/undef vector, landing at element 2939/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 2940static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, unsigned Idx, 2941 bool isZero, SelectionDAG &DAG) { 2942 MVT::ValueType VT = V2.getValueType(); 2943 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT); 2944 unsigned NumElems = MVT::getVectorNumElements(V2.getValueType()); 2945 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2946 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 2947 SmallVector<SDOperand, 16> MaskVec; 2948 for (unsigned i = 0; i != NumElems; ++i) 2949 if (i == Idx) // If this is the insertion idx, put the low elt of V2 here. 2950 MaskVec.push_back(DAG.getConstant(NumElems, EVT)); 2951 else 2952 MaskVec.push_back(DAG.getConstant(i, EVT)); 2953 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2954 &MaskVec[0], MaskVec.size()); 2955 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2956} 2957 2958/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 2959/// 2960static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, 2961 unsigned NumNonZero, unsigned NumZero, 2962 SelectionDAG &DAG, TargetLowering &TLI) { 2963 if (NumNonZero > 8) 2964 return SDOperand(); 2965 2966 SDOperand V(0, 0); 2967 bool First = true; 2968 for (unsigned i = 0; i < 16; ++i) { 2969 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 2970 if (ThisIsNonZero && First) { 2971 if (NumZero) 2972 V = getZeroVector(MVT::v8i16, DAG); 2973 else 2974 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2975 First = false; 2976 } 2977 2978 if ((i & 1) != 0) { 2979 SDOperand ThisElt(0, 0), LastElt(0, 0); 2980 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 2981 if (LastIsNonZero) { 2982 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); 2983 } 2984 if (ThisIsNonZero) { 2985 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i)); 2986 ThisElt = DAG.getNode(ISD::SHL, MVT::i16, 2987 ThisElt, DAG.getConstant(8, MVT::i8)); 2988 if (LastIsNonZero) 2989 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt); 2990 } else 2991 ThisElt = LastElt; 2992 2993 if (ThisElt.Val) 2994 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt, 2995 DAG.getIntPtrConstant(i/2)); 2996 } 2997 } 2998 2999 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V); 3000} 3001 3002/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 3003/// 3004static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, 3005 unsigned NumNonZero, unsigned NumZero, 3006 SelectionDAG &DAG, TargetLowering &TLI) { 3007 if (NumNonZero > 4) 3008 return SDOperand(); 3009 3010 SDOperand V(0, 0); 3011 bool First = true; 3012 for (unsigned i = 0; i < 8; ++i) { 3013 bool isNonZero = (NonZeros & (1 << i)) != 0; 3014 if (isNonZero) { 3015 if (First) { 3016 if (NumZero) 3017 V = getZeroVector(MVT::v8i16, DAG); 3018 else 3019 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 3020 First = false; 3021 } 3022 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i), 3023 DAG.getIntPtrConstant(i)); 3024 } 3025 } 3026 3027 return V; 3028} 3029 3030SDOperand 3031X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3032 // All zero's are handled with pxor, all one's are handled with pcmpeqd. 3033 if (ISD::isBuildVectorAllZeros(Op.Val) || ISD::isBuildVectorAllOnes(Op.Val)) { 3034 // Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to 3035 // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are 3036 // eliminated on x86-32 hosts. 3037 if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32) 3038 return Op; 3039 3040 if (ISD::isBuildVectorAllOnes(Op.Val)) 3041 return getOnesVector(Op.getValueType(), DAG); 3042 return getZeroVector(Op.getValueType(), DAG); 3043 } 3044 3045 MVT::ValueType VT = Op.getValueType(); 3046 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3047 unsigned EVTBits = MVT::getSizeInBits(EVT); 3048 3049 unsigned NumElems = Op.getNumOperands(); 3050 unsigned NumZero = 0; 3051 unsigned NumNonZero = 0; 3052 unsigned NonZeros = 0; 3053 bool IsAllConstants = true; 3054 SmallSet<SDOperand, 8> Values; 3055 for (unsigned i = 0; i < NumElems; ++i) { 3056 SDOperand Elt = Op.getOperand(i); 3057 if (Elt.getOpcode() == ISD::UNDEF) 3058 continue; 3059 Values.insert(Elt); 3060 if (Elt.getOpcode() != ISD::Constant && 3061 Elt.getOpcode() != ISD::ConstantFP) 3062 IsAllConstants = false; 3063 if (isZeroNode(Elt)) 3064 NumZero++; 3065 else { 3066 NonZeros |= (1 << i); 3067 NumNonZero++; 3068 } 3069 } 3070 3071 if (NumNonZero == 0) { 3072 // All undef vector. Return an UNDEF. All zero vectors were handled above. 3073 return DAG.getNode(ISD::UNDEF, VT); 3074 } 3075 3076 // Special case for single non-zero, non-undef, element. 3077 if (NumNonZero == 1 && NumElems <= 4) { 3078 unsigned Idx = CountTrailingZeros_32(NonZeros); 3079 SDOperand Item = Op.getOperand(Idx); 3080 3081 // If this is an insertion of an i64 value on x86-32, and if the top bits of 3082 // the value are obviously zero, truncate the value to i32 and do the 3083 // insertion that way. Only do this if the value is non-constant or if the 3084 // value is a constant being inserted into element 0. It is cheaper to do 3085 // a constant pool load than it is to do a movd + shuffle. 3086 if (EVT == MVT::i64 && !Subtarget->is64Bit() && 3087 (!IsAllConstants || Idx == 0)) { 3088 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 3089 // Handle MMX and SSE both. 3090 MVT::ValueType VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32; 3091 MVT::ValueType VecElts = VT == MVT::v2i64 ? 4 : 2; 3092 3093 // Truncate the value (which may itself be a constant) to i32, and 3094 // convert it to a vector with movd (S2V+shuffle to zero extend). 3095 Item = DAG.getNode(ISD::TRUNCATE, MVT::i32, Item); 3096 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VecVT, Item); 3097 Item = getShuffleVectorZeroOrUndef(Item, 0, true, DAG); 3098 3099 // Now we have our 32-bit value zero extended in the low element of 3100 // a vector. If Idx != 0, swizzle it into place. 3101 if (Idx != 0) { 3102 SDOperand Ops[] = { 3103 Item, DAG.getNode(ISD::UNDEF, Item.getValueType()), 3104 getSwapEltZeroMask(VecElts, Idx, DAG) 3105 }; 3106 Item = DAG.getNode(ISD::VECTOR_SHUFFLE, VecVT, Ops, 3); 3107 } 3108 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Item); 3109 } 3110 } 3111 3112 // If we have a constant or non-constant insertion into the low element of 3113 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 3114 // the rest of the elements. This will be matched as movd/movq/movss/movsd 3115 // depending on what the source datatype is. Because we can only get here 3116 // when NumElems <= 4, this only needs to handle i32/f32/i64/f64. 3117 if (Idx == 0 && 3118 // Don't do this for i64 values on x86-32. 3119 (EVT != MVT::i64 || Subtarget->is64Bit())) { 3120 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 3121 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 3122 return getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, DAG); 3123 } 3124 3125 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 3126 return SDOperand(); 3127 3128 // Otherwise, if this is a vector with i32 or f32 elements, and the element 3129 // is a non-constant being inserted into an element other than the low one, 3130 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 3131 // movd/movss) to move this into the low element, then shuffle it into 3132 // place. 3133 if (EVTBits == 32) { 3134 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 3135 3136 // Turn it into a shuffle of zero and zero-extended scalar to vector. 3137 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, DAG); 3138 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3139 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3140 SmallVector<SDOperand, 8> MaskVec; 3141 for (unsigned i = 0; i < NumElems; i++) 3142 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); 3143 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3144 &MaskVec[0], MaskVec.size()); 3145 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item, 3146 DAG.getNode(ISD::UNDEF, VT), Mask); 3147 } 3148 } 3149 3150 // Splat is obviously ok. Let legalizer expand it to a shuffle. 3151 if (Values.size() == 1) 3152 return SDOperand(); 3153 3154 // A vector full of immediates; various special cases are already 3155 // handled, so this is best done with a single constant-pool load. 3156 if (IsAllConstants) 3157 return SDOperand(); 3158 3159 // Let legalizer expand 2-wide build_vectors. 3160 if (EVTBits == 64) 3161 return SDOperand(); 3162 3163 // If element VT is < 32 bits, convert it to inserts into a zero vector. 3164 if (EVTBits == 8 && NumElems == 16) { 3165 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 3166 *this); 3167 if (V.Val) return V; 3168 } 3169 3170 if (EVTBits == 16 && NumElems == 8) { 3171 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 3172 *this); 3173 if (V.Val) return V; 3174 } 3175 3176 // If element VT is == 32 bits, turn it into a number of shuffles. 3177 SmallVector<SDOperand, 8> V; 3178 V.resize(NumElems); 3179 if (NumElems == 4 && NumZero > 0) { 3180 for (unsigned i = 0; i < 4; ++i) { 3181 bool isZero = !(NonZeros & (1 << i)); 3182 if (isZero) 3183 V[i] = getZeroVector(VT, DAG); 3184 else 3185 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3186 } 3187 3188 for (unsigned i = 0; i < 2; ++i) { 3189 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 3190 default: break; 3191 case 0: 3192 V[i] = V[i*2]; // Must be a zero vector. 3193 break; 3194 case 1: 3195 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2], 3196 getMOVLMask(NumElems, DAG)); 3197 break; 3198 case 2: 3199 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3200 getMOVLMask(NumElems, DAG)); 3201 break; 3202 case 3: 3203 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3204 getUnpacklMask(NumElems, DAG)); 3205 break; 3206 } 3207 } 3208 3209 // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd) 3210 // clears the upper bits. 3211 // FIXME: we can do the same for v4f32 case when we know both parts of 3212 // the lower half come from scalar_to_vector (loadf32). We should do 3213 // that in post legalizer dag combiner with target specific hooks. 3214 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0) 3215 return V[0]; 3216 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3217 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 3218 SmallVector<SDOperand, 8> MaskVec; 3219 bool Reverse = (NonZeros & 0x3) == 2; 3220 for (unsigned i = 0; i < 2; ++i) 3221 if (Reverse) 3222 MaskVec.push_back(DAG.getConstant(1-i, EVT)); 3223 else 3224 MaskVec.push_back(DAG.getConstant(i, EVT)); 3225 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 3226 for (unsigned i = 0; i < 2; ++i) 3227 if (Reverse) 3228 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); 3229 else 3230 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); 3231 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3232 &MaskVec[0], MaskVec.size()); 3233 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask); 3234 } 3235 3236 if (Values.size() > 2) { 3237 // Expand into a number of unpckl*. 3238 // e.g. for v4f32 3239 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 3240 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 3241 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 3242 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG); 3243 for (unsigned i = 0; i < NumElems; ++i) 3244 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3245 NumElems >>= 1; 3246 while (NumElems != 0) { 3247 for (unsigned i = 0; i < NumElems; ++i) 3248 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems], 3249 UnpckMask); 3250 NumElems >>= 1; 3251 } 3252 return V[0]; 3253 } 3254 3255 return SDOperand(); 3256} 3257 3258static 3259SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, 3260 SDOperand PermMask, SelectionDAG &DAG, 3261 TargetLowering &TLI) { 3262 SDOperand NewV; 3263 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(8); 3264 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3265 MVT::ValueType PtrVT = TLI.getPointerTy(); 3266 SmallVector<SDOperand, 8> MaskElts(PermMask.Val->op_begin(), 3267 PermMask.Val->op_end()); 3268 3269 // First record which half of which vector the low elements come from. 3270 SmallVector<unsigned, 4> LowQuad(4); 3271 for (unsigned i = 0; i < 4; ++i) { 3272 SDOperand Elt = MaskElts[i]; 3273 if (Elt.getOpcode() == ISD::UNDEF) 3274 continue; 3275 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3276 int QuadIdx = EltIdx / 4; 3277 ++LowQuad[QuadIdx]; 3278 } 3279 int BestLowQuad = -1; 3280 unsigned MaxQuad = 1; 3281 for (unsigned i = 0; i < 4; ++i) { 3282 if (LowQuad[i] > MaxQuad) { 3283 BestLowQuad = i; 3284 MaxQuad = LowQuad[i]; 3285 } 3286 } 3287 3288 // Record which half of which vector the high elements come from. 3289 SmallVector<unsigned, 4> HighQuad(4); 3290 for (unsigned i = 4; i < 8; ++i) { 3291 SDOperand Elt = MaskElts[i]; 3292 if (Elt.getOpcode() == ISD::UNDEF) 3293 continue; 3294 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3295 int QuadIdx = EltIdx / 4; 3296 ++HighQuad[QuadIdx]; 3297 } 3298 int BestHighQuad = -1; 3299 MaxQuad = 1; 3300 for (unsigned i = 0; i < 4; ++i) { 3301 if (HighQuad[i] > MaxQuad) { 3302 BestHighQuad = i; 3303 MaxQuad = HighQuad[i]; 3304 } 3305 } 3306 3307 // If it's possible to sort parts of either half with PSHUF{H|L}W, then do it. 3308 if (BestLowQuad != -1 || BestHighQuad != -1) { 3309 // First sort the 4 chunks in order using shufpd. 3310 SmallVector<SDOperand, 8> MaskVec; 3311 if (BestLowQuad != -1) 3312 MaskVec.push_back(DAG.getConstant(BestLowQuad, MVT::i32)); 3313 else 3314 MaskVec.push_back(DAG.getConstant(0, MVT::i32)); 3315 if (BestHighQuad != -1) 3316 MaskVec.push_back(DAG.getConstant(BestHighQuad, MVT::i32)); 3317 else 3318 MaskVec.push_back(DAG.getConstant(1, MVT::i32)); 3319 SDOperand Mask= DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, &MaskVec[0],2); 3320 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64, 3321 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V1), 3322 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V2), Mask); 3323 NewV = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, NewV); 3324 3325 // Now sort high and low parts separately. 3326 BitVector InOrder(8); 3327 if (BestLowQuad != -1) { 3328 // Sort lower half in order using PSHUFLW. 3329 MaskVec.clear(); 3330 bool AnyOutOrder = false; 3331 for (unsigned i = 0; i != 4; ++i) { 3332 SDOperand Elt = MaskElts[i]; 3333 if (Elt.getOpcode() == ISD::UNDEF) { 3334 MaskVec.push_back(Elt); 3335 InOrder.set(i); 3336 } else { 3337 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3338 if (EltIdx != i) 3339 AnyOutOrder = true; 3340 MaskVec.push_back(DAG.getConstant(EltIdx % 4, MaskEVT)); 3341 // If this element is in the right place after this shuffle, then 3342 // remember it. 3343 if ((int)(EltIdx / 4) == BestLowQuad) 3344 InOrder.set(i); 3345 } 3346 } 3347 if (AnyOutOrder) { 3348 for (unsigned i = 4; i != 8; ++i) 3349 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3350 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3351 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3352 } 3353 } 3354 3355 if (BestHighQuad != -1) { 3356 // Sort high half in order using PSHUFHW if possible. 3357 MaskVec.clear(); 3358 for (unsigned i = 0; i != 4; ++i) 3359 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3360 bool AnyOutOrder = false; 3361 for (unsigned i = 4; i != 8; ++i) { 3362 SDOperand Elt = MaskElts[i]; 3363 if (Elt.getOpcode() == ISD::UNDEF) { 3364 MaskVec.push_back(Elt); 3365 InOrder.set(i); 3366 } else { 3367 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3368 if (EltIdx != i) 3369 AnyOutOrder = true; 3370 MaskVec.push_back(DAG.getConstant((EltIdx % 4) + 4, MaskEVT)); 3371 // If this element is in the right place after this shuffle, then 3372 // remember it. 3373 if ((int)(EltIdx / 4) == BestHighQuad) 3374 InOrder.set(i); 3375 } 3376 } 3377 if (AnyOutOrder) { 3378 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3379 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3380 } 3381 } 3382 3383 // The other elements are put in the right place using pextrw and pinsrw. 3384 for (unsigned i = 0; i != 8; ++i) { 3385 if (InOrder[i]) 3386 continue; 3387 SDOperand Elt = MaskElts[i]; 3388 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3389 if (EltIdx == i) 3390 continue; 3391 SDOperand ExtOp = (EltIdx < 8) 3392 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3393 DAG.getConstant(EltIdx, PtrVT)) 3394 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3395 DAG.getConstant(EltIdx - 8, PtrVT)); 3396 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3397 DAG.getConstant(i, PtrVT)); 3398 } 3399 return NewV; 3400 } 3401 3402 // PSHUF{H|L}W are not used. Lower into extracts and inserts but try to use 3403 ///as few as possible. 3404 // First, let's find out how many elements are already in the right order. 3405 unsigned V1InOrder = 0; 3406 unsigned V1FromV1 = 0; 3407 unsigned V2InOrder = 0; 3408 unsigned V2FromV2 = 0; 3409 SmallVector<SDOperand, 8> V1Elts; 3410 SmallVector<SDOperand, 8> V2Elts; 3411 for (unsigned i = 0; i < 8; ++i) { 3412 SDOperand Elt = MaskElts[i]; 3413 if (Elt.getOpcode() == ISD::UNDEF) { 3414 V1Elts.push_back(Elt); 3415 V2Elts.push_back(Elt); 3416 ++V1InOrder; 3417 ++V2InOrder; 3418 continue; 3419 } 3420 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3421 if (EltIdx == i) { 3422 V1Elts.push_back(Elt); 3423 V2Elts.push_back(DAG.getConstant(i+8, MaskEVT)); 3424 ++V1InOrder; 3425 } else if (EltIdx == i+8) { 3426 V1Elts.push_back(Elt); 3427 V2Elts.push_back(DAG.getConstant(i, MaskEVT)); 3428 ++V2InOrder; 3429 } else if (EltIdx < 8) { 3430 V1Elts.push_back(Elt); 3431 ++V1FromV1; 3432 } else { 3433 V2Elts.push_back(DAG.getConstant(EltIdx-8, MaskEVT)); 3434 ++V2FromV2; 3435 } 3436 } 3437 3438 if (V2InOrder > V1InOrder) { 3439 PermMask = CommuteVectorShuffleMask(PermMask, DAG); 3440 std::swap(V1, V2); 3441 std::swap(V1Elts, V2Elts); 3442 std::swap(V1FromV1, V2FromV2); 3443 } 3444 3445 if ((V1FromV1 + V1InOrder) != 8) { 3446 // Some elements are from V2. 3447 if (V1FromV1) { 3448 // If there are elements that are from V1 but out of place, 3449 // then first sort them in place 3450 SmallVector<SDOperand, 8> MaskVec; 3451 for (unsigned i = 0; i < 8; ++i) { 3452 SDOperand Elt = V1Elts[i]; 3453 if (Elt.getOpcode() == ISD::UNDEF) { 3454 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3455 continue; 3456 } 3457 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3458 if (EltIdx >= 8) 3459 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3460 else 3461 MaskVec.push_back(DAG.getConstant(EltIdx, MaskEVT)); 3462 } 3463 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3464 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, V1, V1, Mask); 3465 } 3466 3467 NewV = V1; 3468 for (unsigned i = 0; i < 8; ++i) { 3469 SDOperand Elt = V1Elts[i]; 3470 if (Elt.getOpcode() == ISD::UNDEF) 3471 continue; 3472 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3473 if (EltIdx < 8) 3474 continue; 3475 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3476 DAG.getConstant(EltIdx - 8, PtrVT)); 3477 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3478 DAG.getConstant(i, PtrVT)); 3479 } 3480 return NewV; 3481 } else { 3482 // All elements are from V1. 3483 NewV = V1; 3484 for (unsigned i = 0; i < 8; ++i) { 3485 SDOperand Elt = V1Elts[i]; 3486 if (Elt.getOpcode() == ISD::UNDEF) 3487 continue; 3488 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3489 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3490 DAG.getConstant(EltIdx, PtrVT)); 3491 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3492 DAG.getConstant(i, PtrVT)); 3493 } 3494 return NewV; 3495 } 3496} 3497 3498/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 3499/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be 3500/// done when every pair / quad of shuffle mask elements point to elements in 3501/// the right sequence. e.g. 3502/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15> 3503static 3504SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2, 3505 MVT::ValueType VT, 3506 SDOperand PermMask, SelectionDAG &DAG, 3507 TargetLowering &TLI) { 3508 unsigned NumElems = PermMask.getNumOperands(); 3509 unsigned NewWidth = (NumElems == 4) ? 2 : 4; 3510 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NewWidth); 3511 MVT::ValueType NewVT = MaskVT; 3512 switch (VT) { 3513 case MVT::v4f32: NewVT = MVT::v2f64; break; 3514 case MVT::v4i32: NewVT = MVT::v2i64; break; 3515 case MVT::v8i16: NewVT = MVT::v4i32; break; 3516 case MVT::v16i8: NewVT = MVT::v4i32; break; 3517 default: assert(false && "Unexpected!"); 3518 } 3519 3520 if (NewWidth == 2) { 3521 if (MVT::isInteger(VT)) 3522 NewVT = MVT::v2i64; 3523 else 3524 NewVT = MVT::v2f64; 3525 } 3526 unsigned Scale = NumElems / NewWidth; 3527 SmallVector<SDOperand, 8> MaskVec; 3528 for (unsigned i = 0; i < NumElems; i += Scale) { 3529 unsigned StartIdx = ~0U; 3530 for (unsigned j = 0; j < Scale; ++j) { 3531 SDOperand Elt = PermMask.getOperand(i+j); 3532 if (Elt.getOpcode() == ISD::UNDEF) 3533 continue; 3534 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3535 if (StartIdx == ~0U) 3536 StartIdx = EltIdx - (EltIdx % Scale); 3537 if (EltIdx != StartIdx + j) 3538 return SDOperand(); 3539 } 3540 if (StartIdx == ~0U) 3541 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MVT::i32)); 3542 else 3543 MaskVec.push_back(DAG.getConstant(StartIdx / Scale, MVT::i32)); 3544 } 3545 3546 V1 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V1); 3547 V2 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V2); 3548 return DAG.getNode(ISD::VECTOR_SHUFFLE, NewVT, V1, V2, 3549 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3550 &MaskVec[0], MaskVec.size())); 3551} 3552 3553SDOperand 3554X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 3555 SDOperand V1 = Op.getOperand(0); 3556 SDOperand V2 = Op.getOperand(1); 3557 SDOperand PermMask = Op.getOperand(2); 3558 MVT::ValueType VT = Op.getValueType(); 3559 unsigned NumElems = PermMask.getNumOperands(); 3560 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 3561 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 3562 bool V1IsSplat = false; 3563 bool V2IsSplat = false; 3564 3565 if (isUndefShuffle(Op.Val)) 3566 return DAG.getNode(ISD::UNDEF, VT); 3567 3568 if (isZeroShuffle(Op.Val)) 3569 return getZeroVector(VT, DAG); 3570 3571 if (isIdentityMask(PermMask.Val)) 3572 return V1; 3573 else if (isIdentityMask(PermMask.Val, true)) 3574 return V2; 3575 3576 if (isSplatMask(PermMask.Val)) { 3577 if (NumElems <= 4) return Op; 3578 // Promote it to a v4i32 splat. 3579 return PromoteSplat(Op, DAG); 3580 } 3581 3582 // If the shuffle can be profitably rewritten as a narrower shuffle, then 3583 // do it! 3584 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 3585 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3586 if (NewOp.Val) 3587 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3588 } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 3589 // FIXME: Figure out a cleaner way to do this. 3590 // Try to make use of movq to zero out the top part. 3591 if (ISD::isBuildVectorAllZeros(V2.Val)) { 3592 SDOperand NewOp = RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3593 if (NewOp.Val) { 3594 SDOperand NewV1 = NewOp.getOperand(0); 3595 SDOperand NewV2 = NewOp.getOperand(1); 3596 SDOperand NewMask = NewOp.getOperand(2); 3597 if (isCommutedMOVL(NewMask.Val, true, false)) { 3598 NewOp = CommuteVectorShuffle(NewOp, NewV1, NewV2, NewMask, DAG); 3599 NewOp = DAG.getNode(ISD::VECTOR_SHUFFLE, NewOp.getValueType(), 3600 NewV1, NewV2, getMOVLMask(2, DAG)); 3601 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3602 } 3603 } 3604 } else if (ISD::isBuildVectorAllZeros(V1.Val)) { 3605 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3606 if (NewOp.Val && X86::isMOVLMask(NewOp.getOperand(2).Val)) 3607 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3608 } 3609 } 3610 3611 if (X86::isMOVLMask(PermMask.Val)) 3612 return (V1IsUndef) ? V2 : Op; 3613 3614 if (X86::isMOVSHDUPMask(PermMask.Val) || 3615 X86::isMOVSLDUPMask(PermMask.Val) || 3616 X86::isMOVHLPSMask(PermMask.Val) || 3617 X86::isMOVHPMask(PermMask.Val) || 3618 X86::isMOVLPMask(PermMask.Val)) 3619 return Op; 3620 3621 if (ShouldXformToMOVHLPS(PermMask.Val) || 3622 ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val)) 3623 return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3624 3625 bool Commuted = false; 3626 // FIXME: This should also accept a bitcast of a splat? Be careful, not 3627 // 1,1,1,1 -> v8i16 though. 3628 V1IsSplat = isSplatVector(V1.Val); 3629 V2IsSplat = isSplatVector(V2.Val); 3630 3631 // Canonicalize the splat or undef, if present, to be on the RHS. 3632 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 3633 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3634 std::swap(V1IsSplat, V2IsSplat); 3635 std::swap(V1IsUndef, V2IsUndef); 3636 Commuted = true; 3637 } 3638 3639 // FIXME: Figure out a cleaner way to do this. 3640 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) { 3641 if (V2IsUndef) return V1; 3642 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3643 if (V2IsSplat) { 3644 // V2 is a splat, so the mask may be malformed. That is, it may point 3645 // to any V2 element. The instruction selectior won't like this. Get 3646 // a corrected mask and commute to form a proper MOVS{S|D}. 3647 SDOperand NewMask = getMOVLMask(NumElems, DAG); 3648 if (NewMask.Val != PermMask.Val) 3649 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3650 } 3651 return Op; 3652 } 3653 3654 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3655 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3656 X86::isUNPCKLMask(PermMask.Val) || 3657 X86::isUNPCKHMask(PermMask.Val)) 3658 return Op; 3659 3660 if (V2IsSplat) { 3661 // Normalize mask so all entries that point to V2 points to its first 3662 // element then try to match unpck{h|l} again. If match, return a 3663 // new vector_shuffle with the corrected mask. 3664 SDOperand NewMask = NormalizeMask(PermMask, DAG); 3665 if (NewMask.Val != PermMask.Val) { 3666 if (X86::isUNPCKLMask(PermMask.Val, true)) { 3667 SDOperand NewMask = getUnpacklMask(NumElems, DAG); 3668 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3669 } else if (X86::isUNPCKHMask(PermMask.Val, true)) { 3670 SDOperand NewMask = getUnpackhMask(NumElems, DAG); 3671 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3672 } 3673 } 3674 } 3675 3676 // Normalize the node to match x86 shuffle ops if needed 3677 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val)) 3678 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3679 3680 if (Commuted) { 3681 // Commute is back and try unpck* again. 3682 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3683 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3684 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3685 X86::isUNPCKLMask(PermMask.Val) || 3686 X86::isUNPCKHMask(PermMask.Val)) 3687 return Op; 3688 } 3689 3690 // If VT is integer, try PSHUF* first, then SHUFP*. 3691 if (MVT::isInteger(VT)) { 3692 // MMX doesn't have PSHUFD; it does have PSHUFW. While it's theoretically 3693 // possible to shuffle a v2i32 using PSHUFW, that's not yet implemented. 3694 if (((MVT::getSizeInBits(VT) != 64 || NumElems == 4) && 3695 X86::isPSHUFDMask(PermMask.Val)) || 3696 X86::isPSHUFHWMask(PermMask.Val) || 3697 X86::isPSHUFLWMask(PermMask.Val)) { 3698 if (V2.getOpcode() != ISD::UNDEF) 3699 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3700 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3701 return Op; 3702 } 3703 3704 if (X86::isSHUFPMask(PermMask.Val) && 3705 MVT::getSizeInBits(VT) != 64) // Don't do this for MMX. 3706 return Op; 3707 } else { 3708 // Floating point cases in the other order. 3709 if (X86::isSHUFPMask(PermMask.Val)) 3710 return Op; 3711 if (X86::isPSHUFDMask(PermMask.Val) || 3712 X86::isPSHUFHWMask(PermMask.Val) || 3713 X86::isPSHUFLWMask(PermMask.Val)) { 3714 if (V2.getOpcode() != ISD::UNDEF) 3715 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3716 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3717 return Op; 3718 } 3719 } 3720 3721 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 3722 if (VT == MVT::v8i16) { 3723 SDOperand NewOp = LowerVECTOR_SHUFFLEv8i16(V1, V2, PermMask, DAG, *this); 3724 if (NewOp.Val) 3725 return NewOp; 3726 } 3727 3728 // Handle all 4 wide cases with a number of shuffles. 3729 if (NumElems == 4 && MVT::getSizeInBits(VT) != 64) { 3730 // Don't do this for MMX. 3731 MVT::ValueType MaskVT = PermMask.getValueType(); 3732 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3733 SmallVector<std::pair<int, int>, 8> Locs; 3734 Locs.reserve(NumElems); 3735 SmallVector<SDOperand, 8> Mask1(NumElems, 3736 DAG.getNode(ISD::UNDEF, MaskEVT)); 3737 SmallVector<SDOperand, 8> Mask2(NumElems, 3738 DAG.getNode(ISD::UNDEF, MaskEVT)); 3739 unsigned NumHi = 0; 3740 unsigned NumLo = 0; 3741 // If no more than two elements come from either vector. This can be 3742 // implemented with two shuffles. First shuffle gather the elements. 3743 // The second shuffle, which takes the first shuffle as both of its 3744 // vector operands, put the elements into the right order. 3745 for (unsigned i = 0; i != NumElems; ++i) { 3746 SDOperand Elt = PermMask.getOperand(i); 3747 if (Elt.getOpcode() == ISD::UNDEF) { 3748 Locs[i] = std::make_pair(-1, -1); 3749 } else { 3750 unsigned Val = cast<ConstantSDNode>(Elt)->getValue(); 3751 if (Val < NumElems) { 3752 Locs[i] = std::make_pair(0, NumLo); 3753 Mask1[NumLo] = Elt; 3754 NumLo++; 3755 } else { 3756 Locs[i] = std::make_pair(1, NumHi); 3757 if (2+NumHi < NumElems) 3758 Mask1[2+NumHi] = Elt; 3759 NumHi++; 3760 } 3761 } 3762 } 3763 if (NumLo <= 2 && NumHi <= 2) { 3764 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3765 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3766 &Mask1[0], Mask1.size())); 3767 for (unsigned i = 0; i != NumElems; ++i) { 3768 if (Locs[i].first == -1) 3769 continue; 3770 else { 3771 unsigned Idx = (i < NumElems/2) ? 0 : NumElems; 3772 Idx += Locs[i].first * (NumElems/2) + Locs[i].second; 3773 Mask2[i] = DAG.getConstant(Idx, MaskEVT); 3774 } 3775 } 3776 3777 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, 3778 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3779 &Mask2[0], Mask2.size())); 3780 } 3781 3782 // Break it into (shuffle shuffle_hi, shuffle_lo). 3783 Locs.clear(); 3784 SmallVector<SDOperand,8> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3785 SmallVector<SDOperand,8> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3786 SmallVector<SDOperand,8> *MaskPtr = &LoMask; 3787 unsigned MaskIdx = 0; 3788 unsigned LoIdx = 0; 3789 unsigned HiIdx = NumElems/2; 3790 for (unsigned i = 0; i != NumElems; ++i) { 3791 if (i == NumElems/2) { 3792 MaskPtr = &HiMask; 3793 MaskIdx = 1; 3794 LoIdx = 0; 3795 HiIdx = NumElems/2; 3796 } 3797 SDOperand Elt = PermMask.getOperand(i); 3798 if (Elt.getOpcode() == ISD::UNDEF) { 3799 Locs[i] = std::make_pair(-1, -1); 3800 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) { 3801 Locs[i] = std::make_pair(MaskIdx, LoIdx); 3802 (*MaskPtr)[LoIdx] = Elt; 3803 LoIdx++; 3804 } else { 3805 Locs[i] = std::make_pair(MaskIdx, HiIdx); 3806 (*MaskPtr)[HiIdx] = Elt; 3807 HiIdx++; 3808 } 3809 } 3810 3811 SDOperand LoShuffle = 3812 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3813 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3814 &LoMask[0], LoMask.size())); 3815 SDOperand HiShuffle = 3816 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3817 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3818 &HiMask[0], HiMask.size())); 3819 SmallVector<SDOperand, 8> MaskOps; 3820 for (unsigned i = 0; i != NumElems; ++i) { 3821 if (Locs[i].first == -1) { 3822 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3823 } else { 3824 unsigned Idx = Locs[i].first * NumElems + Locs[i].second; 3825 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT)); 3826 } 3827 } 3828 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle, 3829 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3830 &MaskOps[0], MaskOps.size())); 3831 } 3832 3833 return SDOperand(); 3834} 3835 3836SDOperand 3837X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDOperand Op, 3838 SelectionDAG &DAG) { 3839 MVT::ValueType VT = Op.getValueType(); 3840 if (MVT::getSizeInBits(VT) == 8) { 3841 SDOperand Extract = DAG.getNode(X86ISD::PEXTRB, MVT::i32, 3842 Op.getOperand(0), Op.getOperand(1)); 3843 SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, 3844 DAG.getValueType(VT)); 3845 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3846 } else if (MVT::getSizeInBits(VT) == 16) { 3847 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, MVT::i32, 3848 Op.getOperand(0), Op.getOperand(1)); 3849 SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, 3850 DAG.getValueType(VT)); 3851 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3852 } 3853 return SDOperand(); 3854} 3855 3856 3857SDOperand 3858X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3859 if (!isa<ConstantSDNode>(Op.getOperand(1))) 3860 return SDOperand(); 3861 3862 if (Subtarget->hasSSE41()) 3863 return LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 3864 3865 MVT::ValueType VT = Op.getValueType(); 3866 // TODO: handle v16i8. 3867 if (MVT::getSizeInBits(VT) == 16) { 3868 SDOperand Vec = Op.getOperand(0); 3869 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3870 if (Idx == 0) 3871 return DAG.getNode(ISD::TRUNCATE, MVT::i16, 3872 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, 3873 DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Vec), 3874 Op.getOperand(1))); 3875 // Transform it so it match pextrw which produces a 32-bit result. 3876 MVT::ValueType EVT = (MVT::ValueType)(VT+1); 3877 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, 3878 Op.getOperand(0), Op.getOperand(1)); 3879 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, 3880 DAG.getValueType(VT)); 3881 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3882 } else if (MVT::getSizeInBits(VT) == 32) { 3883 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3884 if (Idx == 0) 3885 return Op; 3886 // SHUFPS the element to the lowest double word, then movss. 3887 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3888 SmallVector<SDOperand, 8> IdxVec; 3889 IdxVec. 3890 push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT))); 3891 IdxVec. 3892 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3893 IdxVec. 3894 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3895 IdxVec. 3896 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3897 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3898 &IdxVec[0], IdxVec.size()); 3899 SDOperand Vec = Op.getOperand(0); 3900 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3901 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3902 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3903 DAG.getIntPtrConstant(0)); 3904 } else if (MVT::getSizeInBits(VT) == 64) { 3905 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 3906 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 3907 // to match extract_elt for f64. 3908 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3909 if (Idx == 0) 3910 return Op; 3911 3912 // UNPCKHPD the element to the lowest double word, then movsd. 3913 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 3914 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 3915 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3916 SmallVector<SDOperand, 8> IdxVec; 3917 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT))); 3918 IdxVec. 3919 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3920 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3921 &IdxVec[0], IdxVec.size()); 3922 SDOperand Vec = Op.getOperand(0); 3923 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3924 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3925 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3926 DAG.getIntPtrConstant(0)); 3927 } 3928 3929 return SDOperand(); 3930} 3931 3932SDOperand 3933X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG){ 3934 MVT::ValueType VT = Op.getValueType(); 3935 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3936 3937 SDOperand N0 = Op.getOperand(0); 3938 SDOperand N1 = Op.getOperand(1); 3939 SDOperand N2 = Op.getOperand(2); 3940 3941 if ((MVT::getSizeInBits(EVT) == 8) || (MVT::getSizeInBits(EVT) == 16)) { 3942 unsigned Opc = (MVT::getSizeInBits(EVT) == 8) ? X86ISD::PINSRB 3943 : X86ISD::PINSRW; 3944 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 3945 // argument. 3946 if (N1.getValueType() != MVT::i32) 3947 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3948 if (N2.getValueType() != MVT::i32) 3949 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); 3950 return DAG.getNode(Opc, VT, N0, N1, N2); 3951 } else if (EVT == MVT::f32) { 3952 // Bits [7:6] of the constant are the source select. This will always be 3953 // zero here. The DAG Combiner may combine an extract_elt index into these 3954 // bits. For example (insert (extract, 3), 2) could be matched by putting 3955 // the '3' into bits [7:6] of X86ISD::INSERTPS. 3956 // Bits [5:4] of the constant are the destination select. This is the 3957 // value of the incoming immediate. 3958 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 3959 // combine either bitwise AND or insert of float 0.0 to set these bits. 3960 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue() << 4); 3961 return DAG.getNode(X86ISD::INSERTPS, VT, N0, N1, N2); 3962 } 3963 return SDOperand(); 3964} 3965 3966SDOperand 3967X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3968 MVT::ValueType VT = Op.getValueType(); 3969 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3970 3971 if (Subtarget->hasSSE41()) 3972 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 3973 3974 if (EVT == MVT::i8) 3975 return SDOperand(); 3976 3977 SDOperand N0 = Op.getOperand(0); 3978 SDOperand N1 = Op.getOperand(1); 3979 SDOperand N2 = Op.getOperand(2); 3980 3981 if (MVT::getSizeInBits(EVT) == 16) { 3982 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 3983 // as its second argument. 3984 if (N1.getValueType() != MVT::i32) 3985 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3986 if (N2.getValueType() != MVT::i32) 3987 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); 3988 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2); 3989 } 3990 return SDOperand(); 3991} 3992 3993SDOperand 3994X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3995 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); 3996 MVT::ValueType VT = MVT::v2i32; 3997 switch (Op.getValueType()) { 3998 default: break; 3999 case MVT::v16i8: 4000 case MVT::v8i16: 4001 VT = MVT::v4i32; 4002 break; 4003 } 4004 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), 4005 DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, AnyExt)); 4006} 4007 4008// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 4009// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 4010// one of the above mentioned nodes. It has to be wrapped because otherwise 4011// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 4012// be used to form addressing mode. These wrapped nodes will be selected 4013// into MOV32ri. 4014SDOperand 4015X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 4016 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 4017 SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(), 4018 getPointerTy(), 4019 CP->getAlignment()); 4020 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4021 // With PIC, the address is actually $g + Offset. 4022 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4023 !Subtarget->isPICStyleRIPRel()) { 4024 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4025 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4026 Result); 4027 } 4028 4029 return Result; 4030} 4031 4032SDOperand 4033X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 4034 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 4035 SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); 4036 // If it's a debug information descriptor, don't mess with it. 4037 if (DAG.isVerifiedDebugInfoDesc(Op)) 4038 return Result; 4039 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4040 // With PIC, the address is actually $g + Offset. 4041 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4042 !Subtarget->isPICStyleRIPRel()) { 4043 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4044 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4045 Result); 4046 } 4047 4048 // For Darwin & Mingw32, external and weak symbols are indirect, so we want to 4049 // load the value at address GV, not the value of GV itself. This means that 4050 // the GlobalAddress must be in the base or index register of the address, not 4051 // the GV offset field. Platform check is inside GVRequiresExtraLoad() call 4052 // The same applies for external symbols during PIC codegen 4053 if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)) 4054 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, 4055 PseudoSourceValue::getGOT(), 0); 4056 4057 return Result; 4058} 4059 4060// Lower ISD::GlobalTLSAddress using the "general dynamic" model 4061static SDOperand 4062LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 4063 const MVT::ValueType PtrVT) { 4064 SDOperand InFlag; 4065 SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX, 4066 DAG.getNode(X86ISD::GlobalBaseReg, 4067 PtrVT), InFlag); 4068 InFlag = Chain.getValue(1); 4069 4070 // emit leal symbol@TLSGD(,%ebx,1), %eax 4071 SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag); 4072 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 4073 GA->getValueType(0), 4074 GA->getOffset()); 4075 SDOperand Ops[] = { Chain, TGA, InFlag }; 4076 SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3); 4077 InFlag = Result.getValue(2); 4078 Chain = Result.getValue(1); 4079 4080 // call ___tls_get_addr. This function receives its argument in 4081 // the register EAX. 4082 Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag); 4083 InFlag = Chain.getValue(1); 4084 4085 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4086 SDOperand Ops1[] = { Chain, 4087 DAG.getTargetExternalSymbol("___tls_get_addr", 4088 PtrVT), 4089 DAG.getRegister(X86::EAX, PtrVT), 4090 DAG.getRegister(X86::EBX, PtrVT), 4091 InFlag }; 4092 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5); 4093 InFlag = Chain.getValue(1); 4094 4095 return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag); 4096} 4097 4098// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 4099// "local exec" model. 4100static SDOperand 4101LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 4102 const MVT::ValueType PtrVT) { 4103 // Get the Thread Pointer 4104 SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT); 4105 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 4106 // exec) 4107 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 4108 GA->getValueType(0), 4109 GA->getOffset()); 4110 SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA); 4111 4112 if (GA->getGlobal()->isDeclaration()) // initial exec TLS model 4113 Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, 4114 PseudoSourceValue::getGOT(), 0); 4115 4116 // The address of the thread local variable is the add of the thread 4117 // pointer with the offset of the variable. 4118 return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset); 4119} 4120 4121SDOperand 4122X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { 4123 // TODO: implement the "local dynamic" model 4124 // TODO: implement the "initial exec"model for pic executables 4125 assert(!Subtarget->is64Bit() && Subtarget->isTargetELF() && 4126 "TLS not implemented for non-ELF and 64-bit targets"); 4127 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 4128 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 4129 // otherwise use the "Local Exec"TLS Model 4130 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 4131 return LowerToTLSGeneralDynamicModel(GA, DAG, getPointerTy()); 4132 else 4133 return LowerToTLSExecModel(GA, DAG, getPointerTy()); 4134} 4135 4136SDOperand 4137X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { 4138 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 4139 SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 4140 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4141 // With PIC, the address is actually $g + Offset. 4142 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4143 !Subtarget->isPICStyleRIPRel()) { 4144 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4145 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4146 Result); 4147 } 4148 4149 return Result; 4150} 4151 4152SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 4153 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 4154 SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); 4155 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4156 // With PIC, the address is actually $g + Offset. 4157 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4158 !Subtarget->isPICStyleRIPRel()) { 4159 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4160 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4161 Result); 4162 } 4163 4164 return Result; 4165} 4166 4167/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and 4168/// take a 2 x i32 value to shift plus a shift amount. 4169SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { 4170 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 4171 MVT::ValueType VT = Op.getValueType(); 4172 unsigned VTBits = MVT::getSizeInBits(VT); 4173 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 4174 SDOperand ShOpLo = Op.getOperand(0); 4175 SDOperand ShOpHi = Op.getOperand(1); 4176 SDOperand ShAmt = Op.getOperand(2); 4177 SDOperand Tmp1 = isSRA ? 4178 DAG.getNode(ISD::SRA, VT, ShOpHi, DAG.getConstant(VTBits - 1, MVT::i8)) : 4179 DAG.getConstant(0, VT); 4180 4181 SDOperand Tmp2, Tmp3; 4182 if (Op.getOpcode() == ISD::SHL_PARTS) { 4183 Tmp2 = DAG.getNode(X86ISD::SHLD, VT, ShOpHi, ShOpLo, ShAmt); 4184 Tmp3 = DAG.getNode(ISD::SHL, VT, ShOpLo, ShAmt); 4185 } else { 4186 Tmp2 = DAG.getNode(X86ISD::SHRD, VT, ShOpLo, ShOpHi, ShAmt); 4187 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, VT, ShOpHi, ShAmt); 4188 } 4189 4190 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4191 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, 4192 DAG.getConstant(VTBits, MVT::i8)); 4193 SDOperand Cond = DAG.getNode(X86ISD::CMP, VT, 4194 AndNode, DAG.getConstant(0, MVT::i8)); 4195 4196 SDOperand Hi, Lo; 4197 SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4198 VTs = DAG.getNodeValueTypes(VT, MVT::Flag); 4199 SmallVector<SDOperand, 4> Ops; 4200 if (Op.getOpcode() == ISD::SHL_PARTS) { 4201 Ops.push_back(Tmp2); 4202 Ops.push_back(Tmp3); 4203 Ops.push_back(CC); 4204 Ops.push_back(Cond); 4205 Hi = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4206 4207 Ops.clear(); 4208 Ops.push_back(Tmp3); 4209 Ops.push_back(Tmp1); 4210 Ops.push_back(CC); 4211 Ops.push_back(Cond); 4212 Lo = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4213 } else { 4214 Ops.push_back(Tmp2); 4215 Ops.push_back(Tmp3); 4216 Ops.push_back(CC); 4217 Ops.push_back(Cond); 4218 Lo = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4219 4220 Ops.clear(); 4221 Ops.push_back(Tmp3); 4222 Ops.push_back(Tmp1); 4223 Ops.push_back(CC); 4224 Ops.push_back(Cond); 4225 Hi = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4226 } 4227 4228 VTs = DAG.getNodeValueTypes(VT, VT); 4229 Ops.clear(); 4230 Ops.push_back(Lo); 4231 Ops.push_back(Hi); 4232 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size()); 4233} 4234 4235SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 4236 MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); 4237 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 && 4238 "Unknown SINT_TO_FP to lower!"); 4239 4240 // These are really Legal; caller falls through into that case. 4241 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 4242 return SDOperand(); 4243 if (SrcVT == MVT::i64 && Op.getValueType() != MVT::f80 && 4244 Subtarget->is64Bit()) 4245 return SDOperand(); 4246 4247 unsigned Size = MVT::getSizeInBits(SrcVT)/8; 4248 MachineFunction &MF = DAG.getMachineFunction(); 4249 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 4250 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4251 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), 4252 StackSlot, 4253 PseudoSourceValue::getFixedStack(), 4254 SSFI); 4255 4256 // Build the FILD 4257 SDVTList Tys; 4258 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 4259 if (useSSE) 4260 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 4261 else 4262 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 4263 SmallVector<SDOperand, 8> Ops; 4264 Ops.push_back(Chain); 4265 Ops.push_back(StackSlot); 4266 Ops.push_back(DAG.getValueType(SrcVT)); 4267 SDOperand Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, 4268 Tys, &Ops[0], Ops.size()); 4269 4270 if (useSSE) { 4271 Chain = Result.getValue(1); 4272 SDOperand InFlag = Result.getValue(2); 4273 4274 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 4275 // shouldn't be necessary except that RFP cannot be live across 4276 // multiple blocks. When stackifier is fixed, they can be uncoupled. 4277 MachineFunction &MF = DAG.getMachineFunction(); 4278 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 4279 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4280 Tys = DAG.getVTList(MVT::Other); 4281 SmallVector<SDOperand, 8> Ops; 4282 Ops.push_back(Chain); 4283 Ops.push_back(Result); 4284 Ops.push_back(StackSlot); 4285 Ops.push_back(DAG.getValueType(Op.getValueType())); 4286 Ops.push_back(InFlag); 4287 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 4288 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, 4289 PseudoSourceValue::getFixedStack(), SSFI); 4290 } 4291 4292 return Result; 4293} 4294 4295std::pair<SDOperand,SDOperand> X86TargetLowering:: 4296FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) { 4297 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 && 4298 "Unknown FP_TO_SINT to lower!"); 4299 4300 // These are really Legal. 4301 if (Op.getValueType() == MVT::i32 && 4302 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 4303 return std::make_pair(SDOperand(), SDOperand()); 4304 if (Subtarget->is64Bit() && 4305 Op.getValueType() == MVT::i64 && 4306 Op.getOperand(0).getValueType() != MVT::f80) 4307 return std::make_pair(SDOperand(), SDOperand()); 4308 4309 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 4310 // stack slot. 4311 MachineFunction &MF = DAG.getMachineFunction(); 4312 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; 4313 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4314 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4315 unsigned Opc; 4316 switch (Op.getValueType()) { 4317 default: assert(0 && "Invalid FP_TO_SINT to lower!"); 4318 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 4319 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 4320 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 4321 } 4322 4323 SDOperand Chain = DAG.getEntryNode(); 4324 SDOperand Value = Op.getOperand(0); 4325 if (isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) { 4326 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 4327 Chain = DAG.getStore(Chain, Value, StackSlot, 4328 PseudoSourceValue::getFixedStack(), SSFI); 4329 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 4330 SDOperand Ops[] = { 4331 Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType()) 4332 }; 4333 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 4334 Chain = Value.getValue(1); 4335 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4336 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4337 } 4338 4339 // Build the FP_TO_INT*_IN_MEM 4340 SDOperand Ops[] = { Chain, Value, StackSlot }; 4341 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3); 4342 4343 return std::make_pair(FIST, StackSlot); 4344} 4345 4346SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 4347 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(Op, DAG); 4348 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4349 if (FIST.Val == 0) return SDOperand(); 4350 4351 // Load the result. 4352 return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0); 4353} 4354 4355SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) { 4356 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(SDOperand(N, 0), DAG); 4357 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4358 if (FIST.Val == 0) return 0; 4359 4360 // Return an i64 load from the stack slot. 4361 SDOperand Res = DAG.getLoad(MVT::i64, FIST, StackSlot, NULL, 0); 4362 4363 // Use a MERGE_VALUES node to drop the chain result value. 4364 return DAG.getNode(ISD::MERGE_VALUES, MVT::i64, Res).Val; 4365} 4366 4367SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { 4368 MVT::ValueType VT = Op.getValueType(); 4369 MVT::ValueType EltVT = VT; 4370 if (MVT::isVector(VT)) 4371 EltVT = MVT::getVectorElementType(VT); 4372 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 4373 std::vector<Constant*> CV; 4374 if (EltVT == MVT::f64) { 4375 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, ~(1ULL << 63)))); 4376 CV.push_back(C); 4377 CV.push_back(C); 4378 } else { 4379 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, ~(1U << 31)))); 4380 CV.push_back(C); 4381 CV.push_back(C); 4382 CV.push_back(C); 4383 CV.push_back(C); 4384 } 4385 Constant *C = ConstantVector::get(CV); 4386 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4387 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4388 PseudoSourceValue::getConstantPool(), 0, 4389 false, 16); 4390 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask); 4391} 4392 4393SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { 4394 MVT::ValueType VT = Op.getValueType(); 4395 MVT::ValueType EltVT = VT; 4396 unsigned EltNum = 1; 4397 if (MVT::isVector(VT)) { 4398 EltVT = MVT::getVectorElementType(VT); 4399 EltNum = MVT::getVectorNumElements(VT); 4400 } 4401 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 4402 std::vector<Constant*> CV; 4403 if (EltVT == MVT::f64) { 4404 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, 1ULL << 63))); 4405 CV.push_back(C); 4406 CV.push_back(C); 4407 } else { 4408 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, 1U << 31))); 4409 CV.push_back(C); 4410 CV.push_back(C); 4411 CV.push_back(C); 4412 CV.push_back(C); 4413 } 4414 Constant *C = ConstantVector::get(CV); 4415 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4416 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4417 PseudoSourceValue::getConstantPool(), 0, 4418 false, 16); 4419 if (MVT::isVector(VT)) { 4420 return DAG.getNode(ISD::BIT_CONVERT, VT, 4421 DAG.getNode(ISD::XOR, MVT::v2i64, 4422 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)), 4423 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Mask))); 4424 } else { 4425 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask); 4426 } 4427} 4428 4429SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { 4430 SDOperand Op0 = Op.getOperand(0); 4431 SDOperand Op1 = Op.getOperand(1); 4432 MVT::ValueType VT = Op.getValueType(); 4433 MVT::ValueType SrcVT = Op1.getValueType(); 4434 const Type *SrcTy = MVT::getTypeForValueType(SrcVT); 4435 4436 // If second operand is smaller, extend it first. 4437 if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) { 4438 Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1); 4439 SrcVT = VT; 4440 SrcTy = MVT::getTypeForValueType(SrcVT); 4441 } 4442 // And if it is bigger, shrink it first. 4443 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4444 Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1, DAG.getIntPtrConstant(1)); 4445 SrcVT = VT; 4446 SrcTy = MVT::getTypeForValueType(SrcVT); 4447 } 4448 4449 // At this point the operands and the result should have the same 4450 // type, and that won't be f80 since that is not custom lowered. 4451 4452 // First get the sign bit of second operand. 4453 std::vector<Constant*> CV; 4454 if (SrcVT == MVT::f64) { 4455 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 1ULL << 63)))); 4456 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4457 } else { 4458 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 1U << 31)))); 4459 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4460 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4461 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4462 } 4463 Constant *C = ConstantVector::get(CV); 4464 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4465 SDOperand Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx, 4466 PseudoSourceValue::getConstantPool(), 0, 4467 false, 16); 4468 SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); 4469 4470 // Shift sign bit right or left if the two operands have different types. 4471 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4472 // Op0 is MVT::f32, Op1 is MVT::f64. 4473 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit); 4474 SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit, 4475 DAG.getConstant(32, MVT::i32)); 4476 SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit); 4477 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit, 4478 DAG.getIntPtrConstant(0)); 4479 } 4480 4481 // Clear first operand sign bit. 4482 CV.clear(); 4483 if (VT == MVT::f64) { 4484 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, ~(1ULL << 63))))); 4485 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4486 } else { 4487 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, ~(1U << 31))))); 4488 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4489 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4490 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4491 } 4492 C = ConstantVector::get(CV); 4493 CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4494 SDOperand Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4495 PseudoSourceValue::getConstantPool(), 0, 4496 false, 16); 4497 SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2); 4498 4499 // Or the value with the sign bit. 4500 return DAG.getNode(X86ISD::FOR, VT, Val, SignBit); 4501} 4502 4503SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { 4504 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 4505 SDOperand Cond; 4506 SDOperand Op0 = Op.getOperand(0); 4507 SDOperand Op1 = Op.getOperand(1); 4508 SDOperand CC = Op.getOperand(2); 4509 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 4510 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); 4511 unsigned X86CC; 4512 4513 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, 4514 Op0, Op1, DAG)) { 4515 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4516 return DAG.getNode(X86ISD::SETCC, MVT::i8, 4517 DAG.getConstant(X86CC, MVT::i8), Cond); 4518 } 4519 4520 assert(isFP && "Illegal integer SetCC!"); 4521 4522 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4523 switch (SetCCOpcode) { 4524 default: assert(false && "Illegal floating point SetCC!"); 4525 case ISD::SETOEQ: { // !PF & ZF 4526 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4527 DAG.getConstant(X86::COND_NP, MVT::i8), Cond); 4528 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4529 DAG.getConstant(X86::COND_E, MVT::i8), Cond); 4530 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); 4531 } 4532 case ISD::SETUNE: { // PF | !ZF 4533 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4534 DAG.getConstant(X86::COND_P, MVT::i8), Cond); 4535 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4536 DAG.getConstant(X86::COND_NE, MVT::i8), Cond); 4537 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); 4538 } 4539 } 4540} 4541 4542 4543SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { 4544 bool addTest = true; 4545 SDOperand Cond = Op.getOperand(0); 4546 SDOperand CC; 4547 4548 if (Cond.getOpcode() == ISD::SETCC) 4549 Cond = LowerSETCC(Cond, DAG); 4550 4551 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4552 // setting operand in place of the X86ISD::SETCC. 4553 if (Cond.getOpcode() == X86ISD::SETCC) { 4554 CC = Cond.getOperand(0); 4555 4556 SDOperand Cmp = Cond.getOperand(1); 4557 unsigned Opc = Cmp.getOpcode(); 4558 MVT::ValueType VT = Op.getValueType(); 4559 4560 bool IllegalFPCMov = false; 4561 if (MVT::isFloatingPoint(VT) && !MVT::isVector(VT) && 4562 !isScalarFPTypeInSSEReg(VT)) // FPStack? 4563 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 4564 4565 if ((Opc == X86ISD::CMP || 4566 Opc == X86ISD::COMI || 4567 Opc == X86ISD::UCOMI) && !IllegalFPCMov) { 4568 Cond = Cmp; 4569 addTest = false; 4570 } 4571 } 4572 4573 if (addTest) { 4574 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4575 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4576 } 4577 4578 const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(), 4579 MVT::Flag); 4580 SmallVector<SDOperand, 4> Ops; 4581 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 4582 // condition is true. 4583 Ops.push_back(Op.getOperand(2)); 4584 Ops.push_back(Op.getOperand(1)); 4585 Ops.push_back(CC); 4586 Ops.push_back(Cond); 4587 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 4588} 4589 4590SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { 4591 bool addTest = true; 4592 SDOperand Chain = Op.getOperand(0); 4593 SDOperand Cond = Op.getOperand(1); 4594 SDOperand Dest = Op.getOperand(2); 4595 SDOperand CC; 4596 4597 if (Cond.getOpcode() == ISD::SETCC) 4598 Cond = LowerSETCC(Cond, DAG); 4599 4600 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4601 // setting operand in place of the X86ISD::SETCC. 4602 if (Cond.getOpcode() == X86ISD::SETCC) { 4603 CC = Cond.getOperand(0); 4604 4605 SDOperand Cmp = Cond.getOperand(1); 4606 unsigned Opc = Cmp.getOpcode(); 4607 if (Opc == X86ISD::CMP || 4608 Opc == X86ISD::COMI || 4609 Opc == X86ISD::UCOMI) { 4610 Cond = Cmp; 4611 addTest = false; 4612 } 4613 } 4614 4615 if (addTest) { 4616 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4617 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4618 } 4619 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), 4620 Chain, Op.getOperand(2), CC, Cond); 4621} 4622 4623 4624// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 4625// Calls to _alloca is needed to probe the stack when allocating more than 4k 4626// bytes in one go. Touching the stack at 4K increments is necessary to ensure 4627// that the guard pages used by the OS virtual memory manager are allocated in 4628// correct sequence. 4629SDOperand 4630X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, 4631 SelectionDAG &DAG) { 4632 assert(Subtarget->isTargetCygMing() && 4633 "This should be used only on Cygwin/Mingw targets"); 4634 4635 // Get the inputs. 4636 SDOperand Chain = Op.getOperand(0); 4637 SDOperand Size = Op.getOperand(1); 4638 // FIXME: Ensure alignment here 4639 4640 SDOperand Flag; 4641 4642 MVT::ValueType IntPtr = getPointerTy(); 4643 MVT::ValueType SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; 4644 4645 Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag); 4646 Flag = Chain.getValue(1); 4647 4648 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4649 SDOperand Ops[] = { Chain, 4650 DAG.getTargetExternalSymbol("_alloca", IntPtr), 4651 DAG.getRegister(X86::EAX, IntPtr), 4652 Flag }; 4653 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4); 4654 Flag = Chain.getValue(1); 4655 4656 Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1); 4657 4658 std::vector<MVT::ValueType> Tys; 4659 Tys.push_back(SPTy); 4660 Tys.push_back(MVT::Other); 4661 SDOperand Ops1[2] = { Chain.getValue(0), Chain }; 4662 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2); 4663} 4664 4665SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) { 4666 SDOperand InFlag(0, 0); 4667 SDOperand Chain = Op.getOperand(0); 4668 unsigned Align = 4669 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 4670 if (Align == 0) Align = 1; 4671 4672 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4673 // If not DWORD aligned or size is more than the threshold, call memset. 4674 // The libc version is likely to be faster for these cases. It can use the 4675 // address value and run time information about the CPU. 4676 if ((Align & 3) != 0 || 4677 (I && I->getValue() > Subtarget->getMaxInlineSizeThreshold())) { 4678 MVT::ValueType IntPtr = getPointerTy(); 4679 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 4680 TargetLowering::ArgListTy Args; 4681 TargetLowering::ArgListEntry Entry; 4682 Entry.Node = Op.getOperand(1); 4683 Entry.Ty = IntPtrTy; 4684 Args.push_back(Entry); 4685 // Extend the unsigned i8 argument to be an int value for the call. 4686 Entry.Node = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2)); 4687 Entry.Ty = IntPtrTy; 4688 Args.push_back(Entry); 4689 Entry.Node = Op.getOperand(3); 4690 Args.push_back(Entry); 4691 std::pair<SDOperand,SDOperand> CallResult = 4692 LowerCallTo(Chain, Type::VoidTy, false, false, false, CallingConv::C, 4693 false, DAG.getExternalSymbol("memset", IntPtr), Args, DAG); 4694 return CallResult.second; 4695 } 4696 4697 MVT::ValueType AVT; 4698 SDOperand Count; 4699 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4700 unsigned BytesLeft = 0; 4701 bool TwoRepStos = false; 4702 if (ValC) { 4703 unsigned ValReg; 4704 uint64_t Val = ValC->getValue() & 255; 4705 4706 // If the value is a constant, then we can potentially use larger sets. 4707 switch (Align & 3) { 4708 case 2: // WORD aligned 4709 AVT = MVT::i16; 4710 ValReg = X86::AX; 4711 Val = (Val << 8) | Val; 4712 break; 4713 case 0: // DWORD aligned 4714 AVT = MVT::i32; 4715 ValReg = X86::EAX; 4716 Val = (Val << 8) | Val; 4717 Val = (Val << 16) | Val; 4718 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned 4719 AVT = MVT::i64; 4720 ValReg = X86::RAX; 4721 Val = (Val << 32) | Val; 4722 } 4723 break; 4724 default: // Byte aligned 4725 AVT = MVT::i8; 4726 ValReg = X86::AL; 4727 Count = Op.getOperand(3); 4728 break; 4729 } 4730 4731 if (AVT > MVT::i8) { 4732 if (I) { 4733 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4734 Count = DAG.getIntPtrConstant(I->getValue() / UBytes); 4735 BytesLeft = I->getValue() % UBytes; 4736 } else { 4737 assert(AVT >= MVT::i32 && 4738 "Do not use rep;stos if not at least DWORD aligned"); 4739 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 4740 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 4741 TwoRepStos = true; 4742 } 4743 } 4744 4745 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT), 4746 InFlag); 4747 InFlag = Chain.getValue(1); 4748 } else { 4749 AVT = MVT::i8; 4750 Count = Op.getOperand(3); 4751 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag); 4752 InFlag = Chain.getValue(1); 4753 } 4754 4755 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4756 Count, InFlag); 4757 InFlag = Chain.getValue(1); 4758 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4759 Op.getOperand(1), InFlag); 4760 InFlag = Chain.getValue(1); 4761 4762 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4763 SmallVector<SDOperand, 8> Ops; 4764 Ops.push_back(Chain); 4765 Ops.push_back(DAG.getValueType(AVT)); 4766 Ops.push_back(InFlag); 4767 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4768 4769 if (TwoRepStos) { 4770 InFlag = Chain.getValue(1); 4771 Count = Op.getOperand(3); 4772 MVT::ValueType CVT = Count.getValueType(); 4773 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4774 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4775 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4776 Left, InFlag); 4777 InFlag = Chain.getValue(1); 4778 Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4779 Ops.clear(); 4780 Ops.push_back(Chain); 4781 Ops.push_back(DAG.getValueType(MVT::i8)); 4782 Ops.push_back(InFlag); 4783 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4784 } else if (BytesLeft) { 4785 // Issue stores for the last 1 - 7 bytes. 4786 SDOperand Value; 4787 unsigned Val = ValC->getValue() & 255; 4788 unsigned Offset = I->getValue() - BytesLeft; 4789 SDOperand DstAddr = Op.getOperand(1); 4790 MVT::ValueType AddrVT = DstAddr.getValueType(); 4791 if (BytesLeft >= 4) { 4792 Val = (Val << 8) | Val; 4793 Val = (Val << 16) | Val; 4794 Value = DAG.getConstant(Val, MVT::i32); 4795 Chain = DAG.getStore(Chain, Value, 4796 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4797 DAG.getConstant(Offset, AddrVT)), 4798 NULL, 0); 4799 BytesLeft -= 4; 4800 Offset += 4; 4801 } 4802 if (BytesLeft >= 2) { 4803 Value = DAG.getConstant((Val << 8) | Val, MVT::i16); 4804 Chain = DAG.getStore(Chain, Value, 4805 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4806 DAG.getConstant(Offset, AddrVT)), 4807 NULL, 0); 4808 BytesLeft -= 2; 4809 Offset += 2; 4810 } 4811 if (BytesLeft == 1) { 4812 Value = DAG.getConstant(Val, MVT::i8); 4813 Chain = DAG.getStore(Chain, Value, 4814 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4815 DAG.getConstant(Offset, AddrVT)), 4816 NULL, 0); 4817 } 4818 } 4819 4820 return Chain; 4821} 4822 4823SDOperand X86TargetLowering::LowerMEMCPYInline(SDOperand Chain, 4824 SDOperand Dest, 4825 SDOperand Source, 4826 unsigned Size, 4827 unsigned Align, 4828 SelectionDAG &DAG) { 4829 MVT::ValueType AVT; 4830 unsigned BytesLeft = 0; 4831 switch (Align & 3) { 4832 case 2: // WORD aligned 4833 AVT = MVT::i16; 4834 break; 4835 case 0: // DWORD aligned 4836 AVT = MVT::i32; 4837 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned 4838 AVT = MVT::i64; 4839 break; 4840 default: // Byte aligned 4841 AVT = MVT::i8; 4842 break; 4843 } 4844 4845 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4846 SDOperand Count = DAG.getIntPtrConstant(Size / UBytes); 4847 BytesLeft = Size % UBytes; 4848 4849 SDOperand InFlag(0, 0); 4850 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4851 Count, InFlag); 4852 InFlag = Chain.getValue(1); 4853 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4854 Dest, InFlag); 4855 InFlag = Chain.getValue(1); 4856 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI, 4857 Source, InFlag); 4858 InFlag = Chain.getValue(1); 4859 4860 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4861 SmallVector<SDOperand, 8> Ops; 4862 Ops.push_back(Chain); 4863 Ops.push_back(DAG.getValueType(AVT)); 4864 Ops.push_back(InFlag); 4865 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4866 4867 if (BytesLeft) { 4868 // Issue loads and stores for the last 1 - 7 bytes. 4869 unsigned Offset = Size - BytesLeft; 4870 SDOperand DstAddr = Dest; 4871 MVT::ValueType DstVT = DstAddr.getValueType(); 4872 SDOperand SrcAddr = Source; 4873 MVT::ValueType SrcVT = SrcAddr.getValueType(); 4874 SDOperand Value; 4875 if (BytesLeft >= 4) { 4876 Value = DAG.getLoad(MVT::i32, Chain, 4877 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4878 DAG.getConstant(Offset, SrcVT)), 4879 NULL, 0); 4880 Chain = Value.getValue(1); 4881 Chain = DAG.getStore(Chain, Value, 4882 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4883 DAG.getConstant(Offset, DstVT)), 4884 NULL, 0); 4885 BytesLeft -= 4; 4886 Offset += 4; 4887 } 4888 if (BytesLeft >= 2) { 4889 Value = DAG.getLoad(MVT::i16, Chain, 4890 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4891 DAG.getConstant(Offset, SrcVT)), 4892 NULL, 0); 4893 Chain = Value.getValue(1); 4894 Chain = DAG.getStore(Chain, Value, 4895 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4896 DAG.getConstant(Offset, DstVT)), 4897 NULL, 0); 4898 BytesLeft -= 2; 4899 Offset += 2; 4900 } 4901 4902 if (BytesLeft == 1) { 4903 Value = DAG.getLoad(MVT::i8, Chain, 4904 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4905 DAG.getConstant(Offset, SrcVT)), 4906 NULL, 0); 4907 Chain = Value.getValue(1); 4908 Chain = DAG.getStore(Chain, Value, 4909 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4910 DAG.getConstant(Offset, DstVT)), 4911 NULL, 0); 4912 } 4913 } 4914 4915 return Chain; 4916} 4917 4918/// Expand the result of: i64,outchain = READCYCLECOUNTER inchain 4919SDNode *X86TargetLowering::ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG){ 4920 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4921 SDOperand TheChain = N->getOperand(0); 4922 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheChain, 1); 4923 if (Subtarget->is64Bit()) { 4924 SDOperand rax = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); 4925 SDOperand rdx = DAG.getCopyFromReg(rax.getValue(1), X86::RDX, 4926 MVT::i64, rax.getValue(2)); 4927 SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, rdx, 4928 DAG.getConstant(32, MVT::i8)); 4929 SDOperand Ops[] = { 4930 DAG.getNode(ISD::OR, MVT::i64, rax, Tmp), rdx.getValue(1) 4931 }; 4932 4933 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4934 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4935 } 4936 4937 SDOperand eax = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); 4938 SDOperand edx = DAG.getCopyFromReg(eax.getValue(1), X86::EDX, 4939 MVT::i32, eax.getValue(2)); 4940 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 4941 SDOperand Ops[] = { eax, edx }; 4942 Ops[0] = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Ops, 2); 4943 4944 // Use a MERGE_VALUES to return the value and chain. 4945 Ops[1] = edx.getValue(1); 4946 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4947 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4948} 4949 4950SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { 4951 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 4952 4953 if (!Subtarget->is64Bit()) { 4954 // vastart just stores the address of the VarArgsFrameIndex slot into the 4955 // memory location argument. 4956 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4957 return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV, 0); 4958 } 4959 4960 // __va_list_tag: 4961 // gp_offset (0 - 6 * 8) 4962 // fp_offset (48 - 48 + 8 * 16) 4963 // overflow_arg_area (point to parameters coming in memory). 4964 // reg_save_area 4965 SmallVector<SDOperand, 8> MemOps; 4966 SDOperand FIN = Op.getOperand(1); 4967 // Store gp_offset 4968 SDOperand Store = DAG.getStore(Op.getOperand(0), 4969 DAG.getConstant(VarArgsGPOffset, MVT::i32), 4970 FIN, SV, 0); 4971 MemOps.push_back(Store); 4972 4973 // Store fp_offset 4974 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4975 Store = DAG.getStore(Op.getOperand(0), 4976 DAG.getConstant(VarArgsFPOffset, MVT::i32), 4977 FIN, SV, 0); 4978 MemOps.push_back(Store); 4979 4980 // Store ptr to overflow_arg_area 4981 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4982 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4983 Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV, 0); 4984 MemOps.push_back(Store); 4985 4986 // Store ptr to reg_save_area. 4987 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); 4988 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 4989 Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV, 0); 4990 MemOps.push_back(Store); 4991 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size()); 4992} 4993 4994SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) { 4995 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 4996 SDOperand Chain = Op.getOperand(0); 4997 SDOperand DstPtr = Op.getOperand(1); 4998 SDOperand SrcPtr = Op.getOperand(2); 4999 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 5000 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 5001 5002 SrcPtr = DAG.getLoad(getPointerTy(), Chain, SrcPtr, SrcSV, 0); 5003 Chain = SrcPtr.getValue(1); 5004 for (unsigned i = 0; i < 3; ++i) { 5005 SDOperand Val = DAG.getLoad(MVT::i64, Chain, SrcPtr, SrcSV, 0); 5006 Chain = Val.getValue(1); 5007 Chain = DAG.getStore(Chain, Val, DstPtr, DstSV, 0); 5008 if (i == 2) 5009 break; 5010 SrcPtr = DAG.getNode(ISD::ADD, getPointerTy(), SrcPtr, 5011 DAG.getIntPtrConstant(8)); 5012 DstPtr = DAG.getNode(ISD::ADD, getPointerTy(), DstPtr, 5013 DAG.getIntPtrConstant(8)); 5014 } 5015 return Chain; 5016} 5017 5018SDOperand 5019X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 5020 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); 5021 switch (IntNo) { 5022 default: return SDOperand(); // Don't custom lower most intrinsics. 5023 // Comparison intrinsics. 5024 case Intrinsic::x86_sse_comieq_ss: 5025 case Intrinsic::x86_sse_comilt_ss: 5026 case Intrinsic::x86_sse_comile_ss: 5027 case Intrinsic::x86_sse_comigt_ss: 5028 case Intrinsic::x86_sse_comige_ss: 5029 case Intrinsic::x86_sse_comineq_ss: 5030 case Intrinsic::x86_sse_ucomieq_ss: 5031 case Intrinsic::x86_sse_ucomilt_ss: 5032 case Intrinsic::x86_sse_ucomile_ss: 5033 case Intrinsic::x86_sse_ucomigt_ss: 5034 case Intrinsic::x86_sse_ucomige_ss: 5035 case Intrinsic::x86_sse_ucomineq_ss: 5036 case Intrinsic::x86_sse2_comieq_sd: 5037 case Intrinsic::x86_sse2_comilt_sd: 5038 case Intrinsic::x86_sse2_comile_sd: 5039 case Intrinsic::x86_sse2_comigt_sd: 5040 case Intrinsic::x86_sse2_comige_sd: 5041 case Intrinsic::x86_sse2_comineq_sd: 5042 case Intrinsic::x86_sse2_ucomieq_sd: 5043 case Intrinsic::x86_sse2_ucomilt_sd: 5044 case Intrinsic::x86_sse2_ucomile_sd: 5045 case Intrinsic::x86_sse2_ucomigt_sd: 5046 case Intrinsic::x86_sse2_ucomige_sd: 5047 case Intrinsic::x86_sse2_ucomineq_sd: { 5048 unsigned Opc = 0; 5049 ISD::CondCode CC = ISD::SETCC_INVALID; 5050 switch (IntNo) { 5051 default: break; 5052 case Intrinsic::x86_sse_comieq_ss: 5053 case Intrinsic::x86_sse2_comieq_sd: 5054 Opc = X86ISD::COMI; 5055 CC = ISD::SETEQ; 5056 break; 5057 case Intrinsic::x86_sse_comilt_ss: 5058 case Intrinsic::x86_sse2_comilt_sd: 5059 Opc = X86ISD::COMI; 5060 CC = ISD::SETLT; 5061 break; 5062 case Intrinsic::x86_sse_comile_ss: 5063 case Intrinsic::x86_sse2_comile_sd: 5064 Opc = X86ISD::COMI; 5065 CC = ISD::SETLE; 5066 break; 5067 case Intrinsic::x86_sse_comigt_ss: 5068 case Intrinsic::x86_sse2_comigt_sd: 5069 Opc = X86ISD::COMI; 5070 CC = ISD::SETGT; 5071 break; 5072 case Intrinsic::x86_sse_comige_ss: 5073 case Intrinsic::x86_sse2_comige_sd: 5074 Opc = X86ISD::COMI; 5075 CC = ISD::SETGE; 5076 break; 5077 case Intrinsic::x86_sse_comineq_ss: 5078 case Intrinsic::x86_sse2_comineq_sd: 5079 Opc = X86ISD::COMI; 5080 CC = ISD::SETNE; 5081 break; 5082 case Intrinsic::x86_sse_ucomieq_ss: 5083 case Intrinsic::x86_sse2_ucomieq_sd: 5084 Opc = X86ISD::UCOMI; 5085 CC = ISD::SETEQ; 5086 break; 5087 case Intrinsic::x86_sse_ucomilt_ss: 5088 case Intrinsic::x86_sse2_ucomilt_sd: 5089 Opc = X86ISD::UCOMI; 5090 CC = ISD::SETLT; 5091 break; 5092 case Intrinsic::x86_sse_ucomile_ss: 5093 case Intrinsic::x86_sse2_ucomile_sd: 5094 Opc = X86ISD::UCOMI; 5095 CC = ISD::SETLE; 5096 break; 5097 case Intrinsic::x86_sse_ucomigt_ss: 5098 case Intrinsic::x86_sse2_ucomigt_sd: 5099 Opc = X86ISD::UCOMI; 5100 CC = ISD::SETGT; 5101 break; 5102 case Intrinsic::x86_sse_ucomige_ss: 5103 case Intrinsic::x86_sse2_ucomige_sd: 5104 Opc = X86ISD::UCOMI; 5105 CC = ISD::SETGE; 5106 break; 5107 case Intrinsic::x86_sse_ucomineq_ss: 5108 case Intrinsic::x86_sse2_ucomineq_sd: 5109 Opc = X86ISD::UCOMI; 5110 CC = ISD::SETNE; 5111 break; 5112 } 5113 5114 unsigned X86CC; 5115 SDOperand LHS = Op.getOperand(1); 5116 SDOperand RHS = Op.getOperand(2); 5117 translateX86CC(CC, true, X86CC, LHS, RHS, DAG); 5118 5119 SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS); 5120 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8, 5121 DAG.getConstant(X86CC, MVT::i8), Cond); 5122 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); 5123 } 5124 } 5125} 5126 5127SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { 5128 // Depths > 0 not supported yet! 5129 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 5130 return SDOperand(); 5131 5132 // Just load the return address 5133 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 5134 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); 5135} 5136 5137SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { 5138 // Depths > 0 not supported yet! 5139 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 5140 return SDOperand(); 5141 5142 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 5143 return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI, 5144 DAG.getIntPtrConstant(4)); 5145} 5146 5147SDOperand X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDOperand Op, 5148 SelectionDAG &DAG) { 5149 // Is not yet supported on x86-64 5150 if (Subtarget->is64Bit()) 5151 return SDOperand(); 5152 5153 return DAG.getIntPtrConstant(8); 5154} 5155 5156SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG) 5157{ 5158 assert(!Subtarget->is64Bit() && 5159 "Lowering of eh_return builtin is not supported yet on x86-64"); 5160 5161 MachineFunction &MF = DAG.getMachineFunction(); 5162 SDOperand Chain = Op.getOperand(0); 5163 SDOperand Offset = Op.getOperand(1); 5164 SDOperand Handler = Op.getOperand(2); 5165 5166 SDOperand Frame = DAG.getRegister(RegInfo->getFrameRegister(MF), 5167 getPointerTy()); 5168 5169 SDOperand StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame, 5170 DAG.getIntPtrConstant(-4UL)); 5171 StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset); 5172 Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0); 5173 Chain = DAG.getCopyToReg(Chain, X86::ECX, StoreAddr); 5174 MF.getRegInfo().addLiveOut(X86::ECX); 5175 5176 return DAG.getNode(X86ISD::EH_RETURN, MVT::Other, 5177 Chain, DAG.getRegister(X86::ECX, getPointerTy())); 5178} 5179 5180SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, 5181 SelectionDAG &DAG) { 5182 SDOperand Root = Op.getOperand(0); 5183 SDOperand Trmp = Op.getOperand(1); // trampoline 5184 SDOperand FPtr = Op.getOperand(2); // nested function 5185 SDOperand Nest = Op.getOperand(3); // 'nest' parameter value 5186 5187 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 5188 5189 const X86InstrInfo *TII = 5190 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 5191 5192 if (Subtarget->is64Bit()) { 5193 SDOperand OutChains[6]; 5194 5195 // Large code-model. 5196 5197 const unsigned char JMP64r = TII->getBaseOpcodeFor(X86::JMP64r); 5198 const unsigned char MOV64ri = TII->getBaseOpcodeFor(X86::MOV64ri); 5199 5200 const unsigned char N86R10 = 5201 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R10); 5202 const unsigned char N86R11 = 5203 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R11); 5204 5205 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 5206 5207 // Load the pointer to the nested function into R11. 5208 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 5209 SDOperand Addr = Trmp; 5210 OutChains[0] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5211 TrmpAddr, 0); 5212 5213 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(2, MVT::i64)); 5214 OutChains[1] = DAG.getStore(Root, FPtr, Addr, TrmpAddr, 2, false, 2); 5215 5216 // Load the 'nest' parameter value into R10. 5217 // R10 is specified in X86CallingConv.td 5218 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 5219 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(10, MVT::i64)); 5220 OutChains[2] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5221 TrmpAddr, 10); 5222 5223 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(12, MVT::i64)); 5224 OutChains[3] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 12, false, 2); 5225 5226 // Jump to the nested function. 5227 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 5228 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(20, MVT::i64)); 5229 OutChains[4] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5230 TrmpAddr, 20); 5231 5232 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 5233 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(22, MVT::i64)); 5234 OutChains[5] = DAG.getStore(Root, DAG.getConstant(ModRM, MVT::i8), Addr, 5235 TrmpAddr, 22); 5236 5237 SDOperand Ops[] = 5238 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 6) }; 5239 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5240 } else { 5241 const Function *Func = 5242 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 5243 unsigned CC = Func->getCallingConv(); 5244 unsigned NestReg; 5245 5246 switch (CC) { 5247 default: 5248 assert(0 && "Unsupported calling convention"); 5249 case CallingConv::C: 5250 case CallingConv::X86_StdCall: { 5251 // Pass 'nest' parameter in ECX. 5252 // Must be kept in sync with X86CallingConv.td 5253 NestReg = X86::ECX; 5254 5255 // Check that ECX wasn't needed by an 'inreg' parameter. 5256 const FunctionType *FTy = Func->getFunctionType(); 5257 const PAListPtr &Attrs = Func->getParamAttrs(); 5258 5259 if (!Attrs.isEmpty() && !Func->isVarArg()) { 5260 unsigned InRegCount = 0; 5261 unsigned Idx = 1; 5262 5263 for (FunctionType::param_iterator I = FTy->param_begin(), 5264 E = FTy->param_end(); I != E; ++I, ++Idx) 5265 if (Attrs.paramHasAttr(Idx, ParamAttr::InReg)) 5266 // FIXME: should only count parameters that are lowered to integers. 5267 InRegCount += (getTargetData()->getTypeSizeInBits(*I) + 31) / 32; 5268 5269 if (InRegCount > 2) { 5270 cerr << "Nest register in use - reduce number of inreg parameters!\n"; 5271 abort(); 5272 } 5273 } 5274 break; 5275 } 5276 case CallingConv::X86_FastCall: 5277 // Pass 'nest' parameter in EAX. 5278 // Must be kept in sync with X86CallingConv.td 5279 NestReg = X86::EAX; 5280 break; 5281 } 5282 5283 SDOperand OutChains[4]; 5284 SDOperand Addr, Disp; 5285 5286 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32)); 5287 Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr); 5288 5289 const unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri); 5290 const unsigned char N86Reg = 5291 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(NestReg); 5292 OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 5293 Trmp, TrmpAddr, 0); 5294 5295 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32)); 5296 OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 1, false, 1); 5297 5298 const unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP); 5299 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32)); 5300 OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr, 5301 TrmpAddr, 5, false, 1); 5302 5303 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(6, MVT::i32)); 5304 OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpAddr, 6, false, 1); 5305 5306 SDOperand Ops[] = 5307 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) }; 5308 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5309 } 5310} 5311 5312SDOperand X86TargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { 5313 /* 5314 The rounding mode is in bits 11:10 of FPSR, and has the following 5315 settings: 5316 00 Round to nearest 5317 01 Round to -inf 5318 10 Round to +inf 5319 11 Round to 0 5320 5321 FLT_ROUNDS, on the other hand, expects the following: 5322 -1 Undefined 5323 0 Round to 0 5324 1 Round to nearest 5325 2 Round to +inf 5326 3 Round to -inf 5327 5328 To perform the conversion, we do: 5329 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 5330 */ 5331 5332 MachineFunction &MF = DAG.getMachineFunction(); 5333 const TargetMachine &TM = MF.getTarget(); 5334 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 5335 unsigned StackAlignment = TFI.getStackAlignment(); 5336 MVT::ValueType VT = Op.getValueType(); 5337 5338 // Save FP Control Word to stack slot 5339 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment); 5340 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 5341 5342 SDOperand Chain = DAG.getNode(X86ISD::FNSTCW16m, MVT::Other, 5343 DAG.getEntryNode(), StackSlot); 5344 5345 // Load FP Control Word from stack slot 5346 SDOperand CWD = DAG.getLoad(MVT::i16, Chain, StackSlot, NULL, 0); 5347 5348 // Transform as necessary 5349 SDOperand CWD1 = 5350 DAG.getNode(ISD::SRL, MVT::i16, 5351 DAG.getNode(ISD::AND, MVT::i16, 5352 CWD, DAG.getConstant(0x800, MVT::i16)), 5353 DAG.getConstant(11, MVT::i8)); 5354 SDOperand CWD2 = 5355 DAG.getNode(ISD::SRL, MVT::i16, 5356 DAG.getNode(ISD::AND, MVT::i16, 5357 CWD, DAG.getConstant(0x400, MVT::i16)), 5358 DAG.getConstant(9, MVT::i8)); 5359 5360 SDOperand RetVal = 5361 DAG.getNode(ISD::AND, MVT::i16, 5362 DAG.getNode(ISD::ADD, MVT::i16, 5363 DAG.getNode(ISD::OR, MVT::i16, CWD1, CWD2), 5364 DAG.getConstant(1, MVT::i16)), 5365 DAG.getConstant(3, MVT::i16)); 5366 5367 5368 return DAG.getNode((MVT::getSizeInBits(VT) < 16 ? 5369 ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); 5370} 5371 5372SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) { 5373 MVT::ValueType VT = Op.getValueType(); 5374 MVT::ValueType OpVT = VT; 5375 unsigned NumBits = MVT::getSizeInBits(VT); 5376 5377 Op = Op.getOperand(0); 5378 if (VT == MVT::i8) { 5379 // Zero extend to i32 since there is not an i8 bsr. 5380 OpVT = MVT::i32; 5381 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5382 } 5383 5384 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 5385 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5386 Op = DAG.getNode(X86ISD::BSR, VTs, Op); 5387 5388 // If src is zero (i.e. bsr sets ZF), returns NumBits. 5389 SmallVector<SDOperand, 4> Ops; 5390 Ops.push_back(Op); 5391 Ops.push_back(DAG.getConstant(NumBits+NumBits-1, OpVT)); 5392 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5393 Ops.push_back(Op.getValue(1)); 5394 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5395 5396 // Finally xor with NumBits-1. 5397 Op = DAG.getNode(ISD::XOR, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 5398 5399 if (VT == MVT::i8) 5400 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5401 return Op; 5402} 5403 5404SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { 5405 MVT::ValueType VT = Op.getValueType(); 5406 MVT::ValueType OpVT = VT; 5407 unsigned NumBits = MVT::getSizeInBits(VT); 5408 5409 Op = Op.getOperand(0); 5410 if (VT == MVT::i8) { 5411 OpVT = MVT::i32; 5412 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5413 } 5414 5415 // Issue a bsf (scan bits forward) which also sets EFLAGS. 5416 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5417 Op = DAG.getNode(X86ISD::BSF, VTs, Op); 5418 5419 // If src is zero (i.e. bsf sets ZF), returns NumBits. 5420 SmallVector<SDOperand, 4> Ops; 5421 Ops.push_back(Op); 5422 Ops.push_back(DAG.getConstant(NumBits, OpVT)); 5423 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5424 Ops.push_back(Op.getValue(1)); 5425 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5426 5427 if (VT == MVT::i8) 5428 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5429 return Op; 5430} 5431 5432SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) { 5433 MVT::ValueType T = cast<AtomicSDNode>(Op.Val)->getVT(); 5434 unsigned Reg = 0; 5435 unsigned size = 0; 5436 switch(T) { 5437 case MVT::i8: Reg = X86::AL; size = 1; break; 5438 case MVT::i16: Reg = X86::AX; size = 2; break; 5439 case MVT::i32: Reg = X86::EAX; size = 4; break; 5440 case MVT::i64: 5441 if (Subtarget->is64Bit()) { 5442 Reg = X86::RAX; size = 8; 5443 } else //Should go away when LowerType stuff lands 5444 return SDOperand(ExpandATOMIC_LCS(Op.Val, DAG), 0); 5445 break; 5446 }; 5447 SDOperand cpIn = DAG.getCopyToReg(Op.getOperand(0), Reg, 5448 Op.getOperand(3), SDOperand()); 5449 SDOperand Ops[] = { cpIn.getValue(0), 5450 Op.getOperand(1), 5451 Op.getOperand(2), 5452 DAG.getTargetConstant(size, MVT::i8), 5453 cpIn.getValue(1) }; 5454 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 5455 SDOperand Result = DAG.getNode(X86ISD::LCMPXCHG_DAG, Tys, Ops, 5); 5456 SDOperand cpOut = 5457 DAG.getCopyFromReg(Result.getValue(0), Reg, T, Result.getValue(1)); 5458 return cpOut; 5459} 5460 5461SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) { 5462 MVT::ValueType T = cast<AtomicSDNode>(Op)->getVT(); 5463 assert (T == MVT::i64 && "Only know how to expand i64 CAS"); 5464 SDOperand cpInL, cpInH; 5465 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), 5466 DAG.getConstant(0, MVT::i32)); 5467 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), 5468 DAG.getConstant(1, MVT::i32)); 5469 cpInL = DAG.getCopyToReg(Op->getOperand(0), X86::EAX, 5470 cpInL, SDOperand()); 5471 cpInH = DAG.getCopyToReg(cpInL.getValue(0), X86::EDX, 5472 cpInH, cpInL.getValue(1)); 5473 SDOperand swapInL, swapInH; 5474 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(2), 5475 DAG.getConstant(0, MVT::i32)); 5476 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(2), 5477 DAG.getConstant(1, MVT::i32)); 5478 swapInL = DAG.getCopyToReg(cpInH.getValue(0), X86::EBX, 5479 swapInL, cpInH.getValue(1)); 5480 swapInH = DAG.getCopyToReg(swapInL.getValue(0), X86::ECX, 5481 swapInH, swapInL.getValue(1)); 5482 SDOperand Ops[] = { swapInH.getValue(0), 5483 Op->getOperand(1), 5484 swapInH.getValue(1)}; 5485 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 5486 SDOperand Result = DAG.getNode(X86ISD::LCMPXCHG8_DAG, Tys, Ops, 3); 5487 SDOperand cpOutL = DAG.getCopyFromReg(Result.getValue(0), X86::EAX, MVT::i32, 5488 Result.getValue(1)); 5489 SDOperand cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), X86::EDX, MVT::i32, 5490 cpOutL.getValue(2)); 5491 SDOperand OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 5492 SDOperand ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OpsF, 2); 5493 Tys = DAG.getVTList(MVT::i64, MVT::Other); 5494 return DAG.getNode(ISD::MERGE_VALUES, Tys, ResultVal, cpOutH.getValue(1)).Val; 5495} 5496 5497/// LowerOperation - Provide custom lowering hooks for some operations. 5498/// 5499SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 5500 switch (Op.getOpcode()) { 5501 default: assert(0 && "Should not custom lower this!"); 5502 case ISD::ATOMIC_LCS: return LowerLCS(Op,DAG); 5503 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 5504 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5505 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 5506 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 5507 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 5508 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5509 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 5510 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5511 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 5512 case ISD::SHL_PARTS: 5513 case ISD::SRA_PARTS: 5514 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 5515 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 5516 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 5517 case ISD::FABS: return LowerFABS(Op, DAG); 5518 case ISD::FNEG: return LowerFNEG(Op, DAG); 5519 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 5520 case ISD::SETCC: return LowerSETCC(Op, DAG); 5521 case ISD::SELECT: return LowerSELECT(Op, DAG); 5522 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 5523 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 5524 case ISD::CALL: return LowerCALL(Op, DAG); 5525 case ISD::RET: return LowerRET(Op, DAG); 5526 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); 5527 case ISD::MEMSET: return LowerMEMSET(Op, DAG); 5528 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG); 5529 case ISD::VASTART: return LowerVASTART(Op, DAG); 5530 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 5531 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 5532 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5533 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5534 case ISD::FRAME_TO_ARGS_OFFSET: 5535 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 5536 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 5537 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 5538 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 5539 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5540 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 5541 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 5542 5543 // FIXME: REMOVE THIS WHEN LegalizeDAGTypes lands. 5544 case ISD::READCYCLECOUNTER: 5545 return SDOperand(ExpandREADCYCLECOUNTER(Op.Val, DAG), 0); 5546 } 5547} 5548 5549/// ExpandOperation - Provide custom lowering hooks for expanding operations. 5550SDNode *X86TargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) { 5551 switch (N->getOpcode()) { 5552 default: assert(0 && "Should not custom lower this!"); 5553 case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG); 5554 case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG); 5555 case ISD::ATOMIC_LCS: return ExpandATOMIC_LCS(N, DAG); 5556 } 5557} 5558 5559const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 5560 switch (Opcode) { 5561 default: return NULL; 5562 case X86ISD::BSF: return "X86ISD::BSF"; 5563 case X86ISD::BSR: return "X86ISD::BSR"; 5564 case X86ISD::SHLD: return "X86ISD::SHLD"; 5565 case X86ISD::SHRD: return "X86ISD::SHRD"; 5566 case X86ISD::FAND: return "X86ISD::FAND"; 5567 case X86ISD::FOR: return "X86ISD::FOR"; 5568 case X86ISD::FXOR: return "X86ISD::FXOR"; 5569 case X86ISD::FSRL: return "X86ISD::FSRL"; 5570 case X86ISD::FILD: return "X86ISD::FILD"; 5571 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 5572 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 5573 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 5574 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 5575 case X86ISD::FLD: return "X86ISD::FLD"; 5576 case X86ISD::FST: return "X86ISD::FST"; 5577 case X86ISD::FP_GET_ST0_ST1: return "X86ISD::FP_GET_ST0_ST1"; 5578 case X86ISD::CALL: return "X86ISD::CALL"; 5579 case X86ISD::TAILCALL: return "X86ISD::TAILCALL"; 5580 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 5581 case X86ISD::CMP: return "X86ISD::CMP"; 5582 case X86ISD::COMI: return "X86ISD::COMI"; 5583 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 5584 case X86ISD::SETCC: return "X86ISD::SETCC"; 5585 case X86ISD::CMOV: return "X86ISD::CMOV"; 5586 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 5587 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 5588 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 5589 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 5590 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 5591 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 5592 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 5593 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 5594 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 5595 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 5596 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 5597 case X86ISD::FMAX: return "X86ISD::FMAX"; 5598 case X86ISD::FMIN: return "X86ISD::FMIN"; 5599 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 5600 case X86ISD::FRCP: return "X86ISD::FRCP"; 5601 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 5602 case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER"; 5603 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 5604 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 5605 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 5606 case X86ISD::LCMPXCHG_DAG: return "x86ISD::LCMPXCHG_DAG"; 5607 case X86ISD::LCMPXCHG8_DAG: return "x86ISD::LCMPXCHG8_DAG"; 5608 } 5609} 5610 5611// isLegalAddressingMode - Return true if the addressing mode represented 5612// by AM is legal for this target, for a load/store of the specified type. 5613bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 5614 const Type *Ty) const { 5615 // X86 supports extremely general addressing modes. 5616 5617 // X86 allows a sign-extended 32-bit immediate field as a displacement. 5618 if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1) 5619 return false; 5620 5621 if (AM.BaseGV) { 5622 // We can only fold this if we don't need an extra load. 5623 if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false)) 5624 return false; 5625 5626 // X86-64 only supports addr of globals in small code model. 5627 if (Subtarget->is64Bit()) { 5628 if (getTargetMachine().getCodeModel() != CodeModel::Small) 5629 return false; 5630 // If lower 4G is not available, then we must use rip-relative addressing. 5631 if (AM.BaseOffs || AM.Scale > 1) 5632 return false; 5633 } 5634 } 5635 5636 switch (AM.Scale) { 5637 case 0: 5638 case 1: 5639 case 2: 5640 case 4: 5641 case 8: 5642 // These scales always work. 5643 break; 5644 case 3: 5645 case 5: 5646 case 9: 5647 // These scales are formed with basereg+scalereg. Only accept if there is 5648 // no basereg yet. 5649 if (AM.HasBaseReg) 5650 return false; 5651 break; 5652 default: // Other stuff never works. 5653 return false; 5654 } 5655 5656 return true; 5657} 5658 5659 5660bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const { 5661 if (!Ty1->isInteger() || !Ty2->isInteger()) 5662 return false; 5663 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 5664 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 5665 if (NumBits1 <= NumBits2 || NumBits2 < 8) 5666 return false; 5667 return Subtarget->is64Bit() || NumBits1 < 64; 5668} 5669 5670bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1, 5671 MVT::ValueType VT2) const { 5672 if (!MVT::isInteger(VT1) || !MVT::isInteger(VT2)) 5673 return false; 5674 unsigned NumBits1 = MVT::getSizeInBits(VT1); 5675 unsigned NumBits2 = MVT::getSizeInBits(VT2); 5676 if (NumBits1 <= NumBits2 || NumBits2 < 8) 5677 return false; 5678 return Subtarget->is64Bit() || NumBits1 < 64; 5679} 5680 5681/// isShuffleMaskLegal - Targets can use this to indicate that they only 5682/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 5683/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 5684/// are assumed to be legal. 5685bool 5686X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { 5687 // Only do shuffles on 128-bit vector types for now. 5688 if (MVT::getSizeInBits(VT) == 64) return false; 5689 return (Mask.Val->getNumOperands() <= 4 || 5690 isIdentityMask(Mask.Val) || 5691 isIdentityMask(Mask.Val, true) || 5692 isSplatMask(Mask.Val) || 5693 isPSHUFHW_PSHUFLWMask(Mask.Val) || 5694 X86::isUNPCKLMask(Mask.Val) || 5695 X86::isUNPCKHMask(Mask.Val) || 5696 X86::isUNPCKL_v_undef_Mask(Mask.Val) || 5697 X86::isUNPCKH_v_undef_Mask(Mask.Val)); 5698} 5699 5700bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps, 5701 MVT::ValueType EVT, 5702 SelectionDAG &DAG) const { 5703 unsigned NumElts = BVOps.size(); 5704 // Only do shuffles on 128-bit vector types for now. 5705 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; 5706 if (NumElts == 2) return true; 5707 if (NumElts == 4) { 5708 return (isMOVLMask(&BVOps[0], 4) || 5709 isCommutedMOVL(&BVOps[0], 4, true) || 5710 isSHUFPMask(&BVOps[0], 4) || 5711 isCommutedSHUFP(&BVOps[0], 4)); 5712 } 5713 return false; 5714} 5715 5716//===----------------------------------------------------------------------===// 5717// X86 Scheduler Hooks 5718//===----------------------------------------------------------------------===// 5719 5720MachineBasicBlock * 5721X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 5722 MachineBasicBlock *BB) { 5723 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5724 switch (MI->getOpcode()) { 5725 default: assert(false && "Unexpected instr type to insert"); 5726 case X86::CMOV_FR32: 5727 case X86::CMOV_FR64: 5728 case X86::CMOV_V4F32: 5729 case X86::CMOV_V2F64: 5730 case X86::CMOV_V2I64: { 5731 // To "insert" a SELECT_CC instruction, we actually have to insert the 5732 // diamond control-flow pattern. The incoming instruction knows the 5733 // destination vreg to set, the condition code register to branch on, the 5734 // true/false values to select between, and a branch opcode to use. 5735 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5736 ilist<MachineBasicBlock>::iterator It = BB; 5737 ++It; 5738 5739 // thisMBB: 5740 // ... 5741 // TrueVal = ... 5742 // cmpTY ccX, r1, r2 5743 // bCC copy1MBB 5744 // fallthrough --> copy0MBB 5745 MachineBasicBlock *thisMBB = BB; 5746 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 5747 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 5748 unsigned Opc = 5749 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 5750 BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB); 5751 MachineFunction *F = BB->getParent(); 5752 F->getBasicBlockList().insert(It, copy0MBB); 5753 F->getBasicBlockList().insert(It, sinkMBB); 5754 // Update machine-CFG edges by first adding all successors of the current 5755 // block to the new block which will contain the Phi node for the select. 5756 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 5757 e = BB->succ_end(); i != e; ++i) 5758 sinkMBB->addSuccessor(*i); 5759 // Next, remove all successors of the current block, and add the true 5760 // and fallthrough blocks as its successors. 5761 while(!BB->succ_empty()) 5762 BB->removeSuccessor(BB->succ_begin()); 5763 BB->addSuccessor(copy0MBB); 5764 BB->addSuccessor(sinkMBB); 5765 5766 // copy0MBB: 5767 // %FalseValue = ... 5768 // # fallthrough to sinkMBB 5769 BB = copy0MBB; 5770 5771 // Update machine-CFG edges 5772 BB->addSuccessor(sinkMBB); 5773 5774 // sinkMBB: 5775 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 5776 // ... 5777 BB = sinkMBB; 5778 BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg()) 5779 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 5780 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 5781 5782 delete MI; // The pseudo instruction is gone now. 5783 return BB; 5784 } 5785 5786 case X86::FP32_TO_INT16_IN_MEM: 5787 case X86::FP32_TO_INT32_IN_MEM: 5788 case X86::FP32_TO_INT64_IN_MEM: 5789 case X86::FP64_TO_INT16_IN_MEM: 5790 case X86::FP64_TO_INT32_IN_MEM: 5791 case X86::FP64_TO_INT64_IN_MEM: 5792 case X86::FP80_TO_INT16_IN_MEM: 5793 case X86::FP80_TO_INT32_IN_MEM: 5794 case X86::FP80_TO_INT64_IN_MEM: { 5795 // Change the floating point control register to use "round towards zero" 5796 // mode when truncating to an integer value. 5797 MachineFunction *F = BB->getParent(); 5798 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2); 5799 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx); 5800 5801 // Load the old value of the high byte of the control word... 5802 unsigned OldCW = 5803 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass); 5804 addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx); 5805 5806 // Set the high part to be round to zero... 5807 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx) 5808 .addImm(0xC7F); 5809 5810 // Reload the modified control word now... 5811 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5812 5813 // Restore the memory image of control word to original value 5814 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx) 5815 .addReg(OldCW); 5816 5817 // Get the X86 opcode to use. 5818 unsigned Opc; 5819 switch (MI->getOpcode()) { 5820 default: assert(0 && "illegal opcode!"); 5821 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 5822 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 5823 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 5824 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 5825 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 5826 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 5827 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 5828 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 5829 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 5830 } 5831 5832 X86AddressMode AM; 5833 MachineOperand &Op = MI->getOperand(0); 5834 if (Op.isRegister()) { 5835 AM.BaseType = X86AddressMode::RegBase; 5836 AM.Base.Reg = Op.getReg(); 5837 } else { 5838 AM.BaseType = X86AddressMode::FrameIndexBase; 5839 AM.Base.FrameIndex = Op.getIndex(); 5840 } 5841 Op = MI->getOperand(1); 5842 if (Op.isImmediate()) 5843 AM.Scale = Op.getImm(); 5844 Op = MI->getOperand(2); 5845 if (Op.isImmediate()) 5846 AM.IndexReg = Op.getImm(); 5847 Op = MI->getOperand(3); 5848 if (Op.isGlobalAddress()) { 5849 AM.GV = Op.getGlobal(); 5850 } else { 5851 AM.Disp = Op.getImm(); 5852 } 5853 addFullAddress(BuildMI(BB, TII->get(Opc)), AM) 5854 .addReg(MI->getOperand(4).getReg()); 5855 5856 // Reload the original control word now. 5857 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5858 5859 delete MI; // The pseudo instruction is gone now. 5860 return BB; 5861 } 5862 } 5863} 5864 5865//===----------------------------------------------------------------------===// 5866// X86 Optimization Hooks 5867//===----------------------------------------------------------------------===// 5868 5869void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 5870 const APInt &Mask, 5871 APInt &KnownZero, 5872 APInt &KnownOne, 5873 const SelectionDAG &DAG, 5874 unsigned Depth) const { 5875 unsigned Opc = Op.getOpcode(); 5876 assert((Opc >= ISD::BUILTIN_OP_END || 5877 Opc == ISD::INTRINSIC_WO_CHAIN || 5878 Opc == ISD::INTRINSIC_W_CHAIN || 5879 Opc == ISD::INTRINSIC_VOID) && 5880 "Should use MaskedValueIsZero if you don't know whether Op" 5881 " is a target node!"); 5882 5883 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything. 5884 switch (Opc) { 5885 default: break; 5886 case X86ISD::SETCC: 5887 KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(), 5888 Mask.getBitWidth() - 1); 5889 break; 5890 } 5891} 5892 5893/// getShuffleScalarElt - Returns the scalar element that will make up the ith 5894/// element of the result of the vector shuffle. 5895static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) { 5896 MVT::ValueType VT = N->getValueType(0); 5897 SDOperand PermMask = N->getOperand(2); 5898 unsigned NumElems = PermMask.getNumOperands(); 5899 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1); 5900 i %= NumElems; 5901 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5902 return (i == 0) 5903 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5904 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) { 5905 SDOperand Idx = PermMask.getOperand(i); 5906 if (Idx.getOpcode() == ISD::UNDEF) 5907 return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5908 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG); 5909 } 5910 return SDOperand(); 5911} 5912 5913/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 5914/// node is a GlobalAddress + an offset. 5915static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) { 5916 unsigned Opc = N->getOpcode(); 5917 if (Opc == X86ISD::Wrapper) { 5918 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) { 5919 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 5920 return true; 5921 } 5922 } else if (Opc == ISD::ADD) { 5923 SDOperand N1 = N->getOperand(0); 5924 SDOperand N2 = N->getOperand(1); 5925 if (isGAPlusOffset(N1.Val, GA, Offset)) { 5926 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 5927 if (V) { 5928 Offset += V->getSignExtended(); 5929 return true; 5930 } 5931 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 5932 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 5933 if (V) { 5934 Offset += V->getSignExtended(); 5935 return true; 5936 } 5937 } 5938 } 5939 return false; 5940} 5941 5942/// isConsecutiveLoad - Returns true if N is loading from an address of Base 5943/// + Dist * Size. 5944static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size, 5945 MachineFrameInfo *MFI) { 5946 if (N->getOperand(0).Val != Base->getOperand(0).Val) 5947 return false; 5948 5949 SDOperand Loc = N->getOperand(1); 5950 SDOperand BaseLoc = Base->getOperand(1); 5951 if (Loc.getOpcode() == ISD::FrameIndex) { 5952 if (BaseLoc.getOpcode() != ISD::FrameIndex) 5953 return false; 5954 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 5955 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 5956 int FS = MFI->getObjectSize(FI); 5957 int BFS = MFI->getObjectSize(BFI); 5958 if (FS != BFS || FS != Size) return false; 5959 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size); 5960 } else { 5961 GlobalValue *GV1 = NULL; 5962 GlobalValue *GV2 = NULL; 5963 int64_t Offset1 = 0; 5964 int64_t Offset2 = 0; 5965 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 5966 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 5967 if (isGA1 && isGA2 && GV1 == GV2) 5968 return Offset1 == (Offset2 + Dist*Size); 5969 } 5970 5971 return false; 5972} 5973 5974static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI, 5975 const X86Subtarget *Subtarget) { 5976 GlobalValue *GV; 5977 int64_t Offset = 0; 5978 if (isGAPlusOffset(Base, GV, Offset)) 5979 return (GV->getAlignment() >= 16 && (Offset % 16) == 0); 5980 // DAG combine handles the stack object case. 5981 return false; 5982} 5983 5984 5985/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 5986/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 5987/// if the load addresses are consecutive, non-overlapping, and in the right 5988/// order. 5989static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 5990 const X86Subtarget *Subtarget) { 5991 MachineFunction &MF = DAG.getMachineFunction(); 5992 MachineFrameInfo *MFI = MF.getFrameInfo(); 5993 MVT::ValueType VT = N->getValueType(0); 5994 MVT::ValueType EVT = MVT::getVectorElementType(VT); 5995 SDOperand PermMask = N->getOperand(2); 5996 int NumElems = (int)PermMask.getNumOperands(); 5997 SDNode *Base = NULL; 5998 for (int i = 0; i < NumElems; ++i) { 5999 SDOperand Idx = PermMask.getOperand(i); 6000 if (Idx.getOpcode() == ISD::UNDEF) { 6001 if (!Base) return SDOperand(); 6002 } else { 6003 SDOperand Arg = 6004 getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG); 6005 if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val)) 6006 return SDOperand(); 6007 if (!Base) 6008 Base = Arg.Val; 6009 else if (!isConsecutiveLoad(Arg.Val, Base, 6010 i, MVT::getSizeInBits(EVT)/8,MFI)) 6011 return SDOperand(); 6012 } 6013 } 6014 6015 bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget); 6016 LoadSDNode *LD = cast<LoadSDNode>(Base); 6017 if (isAlign16) { 6018 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 6019 LD->getSrcValueOffset(), LD->isVolatile()); 6020 } else { 6021 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 6022 LD->getSrcValueOffset(), LD->isVolatile(), 6023 LD->getAlignment()); 6024 } 6025} 6026 6027/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 6028static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 6029 const X86Subtarget *Subtarget) { 6030 SDOperand Cond = N->getOperand(0); 6031 6032 // If we have SSE[12] support, try to form min/max nodes. 6033 if (Subtarget->hasSSE2() && 6034 (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) { 6035 if (Cond.getOpcode() == ISD::SETCC) { 6036 // Get the LHS/RHS of the select. 6037 SDOperand LHS = N->getOperand(1); 6038 SDOperand RHS = N->getOperand(2); 6039 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 6040 6041 unsigned Opcode = 0; 6042 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) { 6043 switch (CC) { 6044 default: break; 6045 case ISD::SETOLE: // (X <= Y) ? X : Y -> min 6046 case ISD::SETULE: 6047 case ISD::SETLE: 6048 if (!UnsafeFPMath) break; 6049 // FALL THROUGH. 6050 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min 6051 case ISD::SETLT: 6052 Opcode = X86ISD::FMIN; 6053 break; 6054 6055 case ISD::SETOGT: // (X > Y) ? X : Y -> max 6056 case ISD::SETUGT: 6057 case ISD::SETGT: 6058 if (!UnsafeFPMath) break; 6059 // FALL THROUGH. 6060 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max 6061 case ISD::SETGE: 6062 Opcode = X86ISD::FMAX; 6063 break; 6064 } 6065 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) { 6066 switch (CC) { 6067 default: break; 6068 case ISD::SETOGT: // (X > Y) ? Y : X -> min 6069 case ISD::SETUGT: 6070 case ISD::SETGT: 6071 if (!UnsafeFPMath) break; 6072 // FALL THROUGH. 6073 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min 6074 case ISD::SETGE: 6075 Opcode = X86ISD::FMIN; 6076 break; 6077 6078 case ISD::SETOLE: // (X <= Y) ? Y : X -> max 6079 case ISD::SETULE: 6080 case ISD::SETLE: 6081 if (!UnsafeFPMath) break; 6082 // FALL THROUGH. 6083 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max 6084 case ISD::SETLT: 6085 Opcode = X86ISD::FMAX; 6086 break; 6087 } 6088 } 6089 6090 if (Opcode) 6091 return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS); 6092 } 6093 6094 } 6095 6096 return SDOperand(); 6097} 6098 6099/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 6100static SDOperand PerformSTORECombine(StoreSDNode *St, SelectionDAG &DAG, 6101 const X86Subtarget *Subtarget) { 6102 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 6103 // the FP state in cases where an emms may be missing. 6104 // A preferable solution to the general problem is to figure out the right 6105 // places to insert EMMS. This qualifies as a quick hack. 6106 if (MVT::isVector(St->getValue().getValueType()) && 6107 MVT::getSizeInBits(St->getValue().getValueType()) == 64 && 6108 isa<LoadSDNode>(St->getValue()) && 6109 !cast<LoadSDNode>(St->getValue())->isVolatile() && 6110 St->getChain().hasOneUse() && !St->isVolatile()) { 6111 SDNode* LdVal = St->getValue().Val; 6112 LoadSDNode *Ld = 0; 6113 int TokenFactorIndex = -1; 6114 SmallVector<SDOperand, 8> Ops; 6115 SDNode* ChainVal = St->getChain().Val; 6116 // Must be a store of a load. We currently handle two cases: the load 6117 // is a direct child, and it's under an intervening TokenFactor. It is 6118 // possible to dig deeper under nested TokenFactors. 6119 if (ChainVal == LdVal) 6120 Ld = cast<LoadSDNode>(St->getChain()); 6121 else if (St->getValue().hasOneUse() && 6122 ChainVal->getOpcode() == ISD::TokenFactor) { 6123 for (unsigned i=0, e = ChainVal->getNumOperands(); i != e; ++i) { 6124 if (ChainVal->getOperand(i).Val == LdVal) { 6125 TokenFactorIndex = i; 6126 Ld = cast<LoadSDNode>(St->getValue()); 6127 } else 6128 Ops.push_back(ChainVal->getOperand(i)); 6129 } 6130 } 6131 if (Ld) { 6132 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 6133 if (Subtarget->is64Bit()) { 6134 SDOperand NewLd = DAG.getLoad(MVT::i64, Ld->getChain(), 6135 Ld->getBasePtr(), Ld->getSrcValue(), 6136 Ld->getSrcValueOffset(), Ld->isVolatile(), 6137 Ld->getAlignment()); 6138 SDOperand NewChain = NewLd.getValue(1); 6139 if (TokenFactorIndex != -1) { 6140 Ops.push_back(NewLd); 6141 NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0], 6142 Ops.size()); 6143 } 6144 return DAG.getStore(NewChain, NewLd, St->getBasePtr(), 6145 St->getSrcValue(), St->getSrcValueOffset(), 6146 St->isVolatile(), St->getAlignment()); 6147 } 6148 6149 // Otherwise, lower to two 32-bit copies. 6150 SDOperand LoAddr = Ld->getBasePtr(); 6151 SDOperand HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, 6152 DAG.getConstant(MVT::i32, 4)); 6153 6154 SDOperand LoLd = DAG.getLoad(MVT::i32, Ld->getChain(), LoAddr, 6155 Ld->getSrcValue(), Ld->getSrcValueOffset(), 6156 Ld->isVolatile(), Ld->getAlignment()); 6157 SDOperand HiLd = DAG.getLoad(MVT::i32, Ld->getChain(), HiAddr, 6158 Ld->getSrcValue(), Ld->getSrcValueOffset()+4, 6159 Ld->isVolatile(), 6160 MinAlign(Ld->getAlignment(), 4)); 6161 6162 SDOperand NewChain = LoLd.getValue(1); 6163 if (TokenFactorIndex != -1) { 6164 Ops.push_back(LoLd); 6165 Ops.push_back(HiLd); 6166 NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0], 6167 Ops.size()); 6168 } 6169 6170 LoAddr = St->getBasePtr(); 6171 HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, 6172 DAG.getConstant(MVT::i32, 4)); 6173 6174 SDOperand LoSt = DAG.getStore(NewChain, LoLd, LoAddr, 6175 St->getSrcValue(), St->getSrcValueOffset(), 6176 St->isVolatile(), St->getAlignment()); 6177 SDOperand HiSt = DAG.getStore(NewChain, HiLd, HiAddr, 6178 St->getSrcValue(), St->getSrcValueOffset()+4, 6179 St->isVolatile(), 6180 MinAlign(St->getAlignment(), 4)); 6181 return DAG.getNode(ISD::TokenFactor, MVT::Other, LoSt, HiSt); 6182 } 6183 } 6184 return SDOperand(); 6185} 6186 6187/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 6188/// X86ISD::FXOR nodes. 6189static SDOperand PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 6190 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 6191 // F[X]OR(0.0, x) -> x 6192 // F[X]OR(x, 0.0) -> x 6193 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 6194 if (C->getValueAPF().isPosZero()) 6195 return N->getOperand(1); 6196 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 6197 if (C->getValueAPF().isPosZero()) 6198 return N->getOperand(0); 6199 return SDOperand(); 6200} 6201 6202/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 6203static SDOperand PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 6204 // FAND(0.0, x) -> 0.0 6205 // FAND(x, 0.0) -> 0.0 6206 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 6207 if (C->getValueAPF().isPosZero()) 6208 return N->getOperand(0); 6209 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 6210 if (C->getValueAPF().isPosZero()) 6211 return N->getOperand(1); 6212 return SDOperand(); 6213} 6214 6215 6216SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, 6217 DAGCombinerInfo &DCI) const { 6218 SelectionDAG &DAG = DCI.DAG; 6219 switch (N->getOpcode()) { 6220 default: break; 6221 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, Subtarget); 6222 case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget); 6223 case ISD::STORE: 6224 return PerformSTORECombine(cast<StoreSDNode>(N), DAG, Subtarget); 6225 case X86ISD::FXOR: 6226 case X86ISD::FOR: return PerformFORCombine(N, DAG); 6227 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 6228 } 6229 6230 return SDOperand(); 6231} 6232 6233//===----------------------------------------------------------------------===// 6234// X86 Inline Assembly Support 6235//===----------------------------------------------------------------------===// 6236 6237/// getConstraintType - Given a constraint letter, return the type of 6238/// constraint it is for this target. 6239X86TargetLowering::ConstraintType 6240X86TargetLowering::getConstraintType(const std::string &Constraint) const { 6241 if (Constraint.size() == 1) { 6242 switch (Constraint[0]) { 6243 case 'A': 6244 case 'f': 6245 case 'r': 6246 case 'R': 6247 case 'l': 6248 case 'q': 6249 case 'Q': 6250 case 'x': 6251 case 'Y': 6252 return C_RegisterClass; 6253 default: 6254 break; 6255 } 6256 } 6257 return TargetLowering::getConstraintType(Constraint); 6258} 6259 6260/// LowerXConstraint - try to replace an X constraint, which matches anything, 6261/// with another that has more specific requirements based on the type of the 6262/// corresponding operand. 6263void X86TargetLowering::lowerXConstraint(MVT::ValueType ConstraintVT, 6264 std::string& s) const { 6265 if (MVT::isFloatingPoint(ConstraintVT)) { 6266 if (Subtarget->hasSSE2()) 6267 s = "Y"; 6268 else if (Subtarget->hasSSE1()) 6269 s = "x"; 6270 else 6271 s = "f"; 6272 } else 6273 return TargetLowering::lowerXConstraint(ConstraintVT, s); 6274} 6275 6276/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 6277/// vector. If it is invalid, don't add anything to Ops. 6278void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, 6279 char Constraint, 6280 std::vector<SDOperand>&Ops, 6281 SelectionDAG &DAG) { 6282 SDOperand Result(0, 0); 6283 6284 switch (Constraint) { 6285 default: break; 6286 case 'I': 6287 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 6288 if (C->getValue() <= 31) { 6289 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 6290 break; 6291 } 6292 } 6293 return; 6294 case 'N': 6295 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 6296 if (C->getValue() <= 255) { 6297 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 6298 break; 6299 } 6300 } 6301 return; 6302 case 'i': { 6303 // Literal immediates are always ok. 6304 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 6305 Result = DAG.getTargetConstant(CST->getValue(), Op.getValueType()); 6306 break; 6307 } 6308 6309 // If we are in non-pic codegen mode, we allow the address of a global (with 6310 // an optional displacement) to be used with 'i'. 6311 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 6312 int64_t Offset = 0; 6313 6314 // Match either (GA) or (GA+C) 6315 if (GA) { 6316 Offset = GA->getOffset(); 6317 } else if (Op.getOpcode() == ISD::ADD) { 6318 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6319 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 6320 if (C && GA) { 6321 Offset = GA->getOffset()+C->getValue(); 6322 } else { 6323 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6324 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 6325 if (C && GA) 6326 Offset = GA->getOffset()+C->getValue(); 6327 else 6328 C = 0, GA = 0; 6329 } 6330 } 6331 6332 if (GA) { 6333 // If addressing this global requires a load (e.g. in PIC mode), we can't 6334 // match. 6335 if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(), 6336 false)) 6337 return; 6338 6339 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), 6340 Offset); 6341 Result = Op; 6342 break; 6343 } 6344 6345 // Otherwise, not valid for this mode. 6346 return; 6347 } 6348 } 6349 6350 if (Result.Val) { 6351 Ops.push_back(Result); 6352 return; 6353 } 6354 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 6355} 6356 6357std::vector<unsigned> X86TargetLowering:: 6358getRegClassForInlineAsmConstraint(const std::string &Constraint, 6359 MVT::ValueType VT) const { 6360 if (Constraint.size() == 1) { 6361 // FIXME: not handling fp-stack yet! 6362 switch (Constraint[0]) { // GCC X86 Constraint Letters 6363 default: break; // Unknown constraint letter 6364 case 'A': // EAX/EDX 6365 if (VT == MVT::i32 || VT == MVT::i64) 6366 return make_vector<unsigned>(X86::EAX, X86::EDX, 0); 6367 break; 6368 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode) 6369 case 'Q': // Q_REGS 6370 if (VT == MVT::i32) 6371 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 6372 else if (VT == MVT::i16) 6373 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 6374 else if (VT == MVT::i8) 6375 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0); 6376 else if (VT == MVT::i64) 6377 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0); 6378 break; 6379 } 6380 } 6381 6382 return std::vector<unsigned>(); 6383} 6384 6385std::pair<unsigned, const TargetRegisterClass*> 6386X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 6387 MVT::ValueType VT) const { 6388 // First, see if this is a constraint that directly corresponds to an LLVM 6389 // register class. 6390 if (Constraint.size() == 1) { 6391 // GCC Constraint Letters 6392 switch (Constraint[0]) { 6393 default: break; 6394 case 'r': // GENERAL_REGS 6395 case 'R': // LEGACY_REGS 6396 case 'l': // INDEX_REGS 6397 if (VT == MVT::i64 && Subtarget->is64Bit()) 6398 return std::make_pair(0U, X86::GR64RegisterClass); 6399 if (VT == MVT::i32) 6400 return std::make_pair(0U, X86::GR32RegisterClass); 6401 else if (VT == MVT::i16) 6402 return std::make_pair(0U, X86::GR16RegisterClass); 6403 else if (VT == MVT::i8) 6404 return std::make_pair(0U, X86::GR8RegisterClass); 6405 break; 6406 case 'f': // FP Stack registers. 6407 // If SSE is enabled for this VT, use f80 to ensure the isel moves the 6408 // value to the correct fpstack register class. 6409 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) 6410 return std::make_pair(0U, X86::RFP32RegisterClass); 6411 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) 6412 return std::make_pair(0U, X86::RFP64RegisterClass); 6413 return std::make_pair(0U, X86::RFP80RegisterClass); 6414 case 'y': // MMX_REGS if MMX allowed. 6415 if (!Subtarget->hasMMX()) break; 6416 return std::make_pair(0U, X86::VR64RegisterClass); 6417 break; 6418 case 'Y': // SSE_REGS if SSE2 allowed 6419 if (!Subtarget->hasSSE2()) break; 6420 // FALL THROUGH. 6421 case 'x': // SSE_REGS if SSE1 allowed 6422 if (!Subtarget->hasSSE1()) break; 6423 6424 switch (VT) { 6425 default: break; 6426 // Scalar SSE types. 6427 case MVT::f32: 6428 case MVT::i32: 6429 return std::make_pair(0U, X86::FR32RegisterClass); 6430 case MVT::f64: 6431 case MVT::i64: 6432 return std::make_pair(0U, X86::FR64RegisterClass); 6433 // Vector types. 6434 case MVT::v16i8: 6435 case MVT::v8i16: 6436 case MVT::v4i32: 6437 case MVT::v2i64: 6438 case MVT::v4f32: 6439 case MVT::v2f64: 6440 return std::make_pair(0U, X86::VR128RegisterClass); 6441 } 6442 break; 6443 } 6444 } 6445 6446 // Use the default implementation in TargetLowering to convert the register 6447 // constraint into a member of a register class. 6448 std::pair<unsigned, const TargetRegisterClass*> Res; 6449 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 6450 6451 // Not found as a standard register? 6452 if (Res.second == 0) { 6453 // GCC calls "st(0)" just plain "st". 6454 if (StringsEqualNoCase("{st}", Constraint)) { 6455 Res.first = X86::ST0; 6456 Res.second = X86::RFP80RegisterClass; 6457 } 6458 6459 return Res; 6460 } 6461 6462 // Otherwise, check to see if this is a register class of the wrong value 6463 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 6464 // turn into {ax},{dx}. 6465 if (Res.second->hasType(VT)) 6466 return Res; // Correct type already, nothing to do. 6467 6468 // All of the single-register GCC register classes map their values onto 6469 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 6470 // really want an 8-bit or 32-bit register, map to the appropriate register 6471 // class and return the appropriate register. 6472 if (Res.second != X86::GR16RegisterClass) 6473 return Res; 6474 6475 if (VT == MVT::i8) { 6476 unsigned DestReg = 0; 6477 switch (Res.first) { 6478 default: break; 6479 case X86::AX: DestReg = X86::AL; break; 6480 case X86::DX: DestReg = X86::DL; break; 6481 case X86::CX: DestReg = X86::CL; break; 6482 case X86::BX: DestReg = X86::BL; break; 6483 } 6484 if (DestReg) { 6485 Res.first = DestReg; 6486 Res.second = Res.second = X86::GR8RegisterClass; 6487 } 6488 } else if (VT == MVT::i32) { 6489 unsigned DestReg = 0; 6490 switch (Res.first) { 6491 default: break; 6492 case X86::AX: DestReg = X86::EAX; break; 6493 case X86::DX: DestReg = X86::EDX; break; 6494 case X86::CX: DestReg = X86::ECX; break; 6495 case X86::BX: DestReg = X86::EBX; break; 6496 case X86::SI: DestReg = X86::ESI; break; 6497 case X86::DI: DestReg = X86::EDI; break; 6498 case X86::BP: DestReg = X86::EBP; break; 6499 case X86::SP: DestReg = X86::ESP; break; 6500 } 6501 if (DestReg) { 6502 Res.first = DestReg; 6503 Res.second = Res.second = X86::GR32RegisterClass; 6504 } 6505 } else if (VT == MVT::i64) { 6506 unsigned DestReg = 0; 6507 switch (Res.first) { 6508 default: break; 6509 case X86::AX: DestReg = X86::RAX; break; 6510 case X86::DX: DestReg = X86::RDX; break; 6511 case X86::CX: DestReg = X86::RCX; break; 6512 case X86::BX: DestReg = X86::RBX; break; 6513 case X86::SI: DestReg = X86::RSI; break; 6514 case X86::DI: DestReg = X86::RDI; break; 6515 case X86::BP: DestReg = X86::RBP; break; 6516 case X86::SP: DestReg = X86::RSP; break; 6517 } 6518 if (DestReg) { 6519 Res.first = DestReg; 6520 Res.second = Res.second = X86::GR64RegisterClass; 6521 } 6522 } 6523 6524 return Res; 6525} 6526