X86ISelLowering.cpp revision 5b8f82e35b51bf007de07a7ca9347d804084ddf8
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86ISelLowering.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86TargetMachine.h" 20#include "llvm/CallingConv.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/ADT/BitVector.h" 27#include "llvm/ADT/VectorExtras.h" 28#include "llvm/Analysis/ScalarEvolutionExpressions.h" 29#include "llvm/CodeGen/CallingConvLower.h" 30#include "llvm/CodeGen/MachineFrameInfo.h" 31#include "llvm/CodeGen/MachineFunction.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/MachineModuleInfo.h" 34#include "llvm/CodeGen/MachineRegisterInfo.h" 35#include "llvm/CodeGen/PseudoSourceValue.h" 36#include "llvm/CodeGen/SelectionDAG.h" 37#include "llvm/Support/MathExtras.h" 38#include "llvm/Support/Debug.h" 39#include "llvm/Target/TargetOptions.h" 40#include "llvm/ADT/SmallSet.h" 41#include "llvm/ADT/StringExtras.h" 42#include "llvm/ParamAttrsList.h" 43using namespace llvm; 44 45X86TargetLowering::X86TargetLowering(TargetMachine &TM) 46 : TargetLowering(TM) { 47 Subtarget = &TM.getSubtarget<X86Subtarget>(); 48 X86ScalarSSEf64 = Subtarget->hasSSE2(); 49 X86ScalarSSEf32 = Subtarget->hasSSE1(); 50 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 51 52 bool Fast = false; 53 54 RegInfo = TM.getRegisterInfo(); 55 56 // Set up the TargetLowering object. 57 58 // X86 is weird, it always uses i8 for shift amounts and setcc results. 59 setShiftAmountType(MVT::i8); 60 setSetCCResultContents(ZeroOrOneSetCCResult); 61 setSchedulingPreference(SchedulingForRegPressure); 62 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0 63 setStackPointerRegisterToSaveRestore(X86StackPtr); 64 65 if (Subtarget->isTargetDarwin()) { 66 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 67 setUseUnderscoreSetJmp(false); 68 setUseUnderscoreLongJmp(false); 69 } else if (Subtarget->isTargetMingw()) { 70 // MS runtime is weird: it exports _setjmp, but longjmp! 71 setUseUnderscoreSetJmp(true); 72 setUseUnderscoreLongJmp(false); 73 } else { 74 setUseUnderscoreSetJmp(true); 75 setUseUnderscoreLongJmp(true); 76 } 77 78 // Set up the register classes. 79 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 80 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 81 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 82 if (Subtarget->is64Bit()) 83 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 84 85 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote); 86 87 // We don't accept any truncstore of integer registers. 88 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 89 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 90 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 91 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 92 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 93 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 94 95 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 96 // operation. 97 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 98 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 99 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 100 101 if (Subtarget->is64Bit()) { 102 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 103 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 104 } else { 105 if (X86ScalarSSEf64) 106 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP. 107 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand); 108 else 109 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 110 } 111 112 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 113 // this operation. 114 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 115 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 116 // SSE has no i16 to fp conversion, only i32 117 if (X86ScalarSSEf32) { 118 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 119 // f32 and f64 cases are Legal, f80 case is not 120 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 121 } else { 122 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 123 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 124 } 125 126 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 127 // are Legal, f80 is custom lowered. 128 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 129 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 130 131 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 132 // this operation. 133 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 134 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 135 136 if (X86ScalarSSEf32) { 137 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 138 // f32 and f64 cases are Legal, f80 case is not 139 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 140 } else { 141 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 142 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 143 } 144 145 // Handle FP_TO_UINT by promoting the destination to a larger signed 146 // conversion. 147 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 148 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 149 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 150 151 if (Subtarget->is64Bit()) { 152 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 153 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 154 } else { 155 if (X86ScalarSSEf32 && !Subtarget->hasSSE3()) 156 // Expand FP_TO_UINT into a select. 157 // FIXME: We would like to use a Custom expander here eventually to do 158 // the optimal thing for SSE vs. the default expansion in the legalizer. 159 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 160 else 161 // With SSE3 we can use fisttpll to convert to a signed i64. 162 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 163 } 164 165 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 166 if (!X86ScalarSSEf64) { 167 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 168 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 169 } 170 171 // Scalar integer divide and remainder are lowered to use operations that 172 // produce two results, to match the available instructions. This exposes 173 // the two-result form to trivial CSE, which is able to combine x/y and x%y 174 // into a single instruction. 175 // 176 // Scalar integer multiply-high is also lowered to use two-result 177 // operations, to match the available instructions. However, plain multiply 178 // (low) operations are left as Legal, as there are single-result 179 // instructions for this in x86. Using the two-result multiply instructions 180 // when both high and low results are needed must be arranged by dagcombine. 181 setOperationAction(ISD::MULHS , MVT::i8 , Expand); 182 setOperationAction(ISD::MULHU , MVT::i8 , Expand); 183 setOperationAction(ISD::SDIV , MVT::i8 , Expand); 184 setOperationAction(ISD::UDIV , MVT::i8 , Expand); 185 setOperationAction(ISD::SREM , MVT::i8 , Expand); 186 setOperationAction(ISD::UREM , MVT::i8 , Expand); 187 setOperationAction(ISD::MULHS , MVT::i16 , Expand); 188 setOperationAction(ISD::MULHU , MVT::i16 , Expand); 189 setOperationAction(ISD::SDIV , MVT::i16 , Expand); 190 setOperationAction(ISD::UDIV , MVT::i16 , Expand); 191 setOperationAction(ISD::SREM , MVT::i16 , Expand); 192 setOperationAction(ISD::UREM , MVT::i16 , Expand); 193 setOperationAction(ISD::MULHS , MVT::i32 , Expand); 194 setOperationAction(ISD::MULHU , MVT::i32 , Expand); 195 setOperationAction(ISD::SDIV , MVT::i32 , Expand); 196 setOperationAction(ISD::UDIV , MVT::i32 , Expand); 197 setOperationAction(ISD::SREM , MVT::i32 , Expand); 198 setOperationAction(ISD::UREM , MVT::i32 , Expand); 199 setOperationAction(ISD::MULHS , MVT::i64 , Expand); 200 setOperationAction(ISD::MULHU , MVT::i64 , Expand); 201 setOperationAction(ISD::SDIV , MVT::i64 , Expand); 202 setOperationAction(ISD::UDIV , MVT::i64 , Expand); 203 setOperationAction(ISD::SREM , MVT::i64 , Expand); 204 setOperationAction(ISD::UREM , MVT::i64 , Expand); 205 206 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 207 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 208 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 209 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 210 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); 211 if (Subtarget->is64Bit()) 212 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 213 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 214 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 215 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 216 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 217 setOperationAction(ISD::FREM , MVT::f32 , Expand); 218 setOperationAction(ISD::FREM , MVT::f64 , Expand); 219 setOperationAction(ISD::FREM , MVT::f80 , Expand); 220 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 221 222 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 223 setOperationAction(ISD::CTTZ , MVT::i8 , Custom); 224 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 225 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 226 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 227 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 228 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 229 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 230 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 231 if (Subtarget->is64Bit()) { 232 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 233 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 234 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 235 } 236 237 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 238 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 239 240 // These should be promoted to a larger select which is supported. 241 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 242 setOperationAction(ISD::SELECT , MVT::i8 , Promote); 243 // X86 wants to expand cmov itself. 244 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 245 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 246 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 247 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 248 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 249 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 250 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 251 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 252 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 253 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 254 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 255 if (Subtarget->is64Bit()) { 256 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 257 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 258 } 259 // X86 ret instruction may pop stack. 260 setOperationAction(ISD::RET , MVT::Other, Custom); 261 if (!Subtarget->is64Bit()) 262 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 263 264 // Darwin ABI issue. 265 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 266 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 267 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 268 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 269 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 270 if (Subtarget->is64Bit()) { 271 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 272 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 273 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 274 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 275 } 276 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 277 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 278 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 279 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 280 if (Subtarget->is64Bit()) { 281 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 282 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 283 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 284 } 285 // X86 wants to expand memset / memcpy itself. 286 setOperationAction(ISD::MEMSET , MVT::Other, Custom); 287 setOperationAction(ISD::MEMCPY , MVT::Other, Custom); 288 289 if (!Subtarget->hasSSE1()) 290 setOperationAction(ISD::PREFETCH , MVT::Other, Expand); 291 292 if (!Subtarget->hasSSE2()) 293 setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand); 294 295 setOperationAction(ISD::ATOMIC_LCS , MVT::i8, Custom); 296 setOperationAction(ISD::ATOMIC_LCS , MVT::i16, Custom); 297 setOperationAction(ISD::ATOMIC_LCS , MVT::i32, Custom); 298 setOperationAction(ISD::ATOMIC_LCS , MVT::i64, Custom); 299 300 // Use the default ISD::LOCATION, ISD::DECLARE expansion. 301 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 302 // FIXME - use subtarget debug flags 303 if (!Subtarget->isTargetDarwin() && 304 !Subtarget->isTargetELF() && 305 !Subtarget->isTargetCygMing()) 306 setOperationAction(ISD::LABEL, MVT::Other, Expand); 307 308 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 309 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 310 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 311 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 312 if (Subtarget->is64Bit()) { 313 // FIXME: Verify 314 setExceptionPointerRegister(X86::RAX); 315 setExceptionSelectorRegister(X86::RDX); 316 } else { 317 setExceptionPointerRegister(X86::EAX); 318 setExceptionSelectorRegister(X86::EDX); 319 } 320 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 321 322 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 323 324 setOperationAction(ISD::TRAP, MVT::Other, Legal); 325 326 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 327 setOperationAction(ISD::VASTART , MVT::Other, Custom); 328 setOperationAction(ISD::VAARG , MVT::Other, Expand); 329 setOperationAction(ISD::VAEND , MVT::Other, Expand); 330 if (Subtarget->is64Bit()) 331 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 332 else 333 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 334 335 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 336 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 337 if (Subtarget->is64Bit()) 338 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 339 if (Subtarget->isTargetCygMing()) 340 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 341 else 342 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 343 344 if (X86ScalarSSEf64) { 345 // f32 and f64 use SSE. 346 // Set up the FP register classes. 347 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 348 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 349 350 // Use ANDPD to simulate FABS. 351 setOperationAction(ISD::FABS , MVT::f64, Custom); 352 setOperationAction(ISD::FABS , MVT::f32, Custom); 353 354 // Use XORP to simulate FNEG. 355 setOperationAction(ISD::FNEG , MVT::f64, Custom); 356 setOperationAction(ISD::FNEG , MVT::f32, Custom); 357 358 // Use ANDPD and ORPD to simulate FCOPYSIGN. 359 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 360 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 361 362 // We don't support sin/cos/fmod 363 setOperationAction(ISD::FSIN , MVT::f64, Expand); 364 setOperationAction(ISD::FCOS , MVT::f64, Expand); 365 setOperationAction(ISD::FSIN , MVT::f32, Expand); 366 setOperationAction(ISD::FCOS , MVT::f32, Expand); 367 368 // Expand FP immediates into loads from the stack, except for the special 369 // cases we handle. 370 addLegalFPImmediate(APFloat(+0.0)); // xorpd 371 addLegalFPImmediate(APFloat(+0.0f)); // xorps 372 373 // Floating truncations from f80 and extensions to f80 go through memory. 374 // If optimizing, we lie about this though and handle it in 375 // InstructionSelectPreprocess so that dagcombine2 can hack on these. 376 if (Fast) { 377 setConvertAction(MVT::f32, MVT::f80, Expand); 378 setConvertAction(MVT::f64, MVT::f80, Expand); 379 setConvertAction(MVT::f80, MVT::f32, Expand); 380 setConvertAction(MVT::f80, MVT::f64, Expand); 381 } 382 } else if (X86ScalarSSEf32) { 383 // Use SSE for f32, x87 for f64. 384 // Set up the FP register classes. 385 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 386 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 387 388 // Use ANDPS to simulate FABS. 389 setOperationAction(ISD::FABS , MVT::f32, Custom); 390 391 // Use XORP to simulate FNEG. 392 setOperationAction(ISD::FNEG , MVT::f32, Custom); 393 394 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 395 396 // Use ANDPS and ORPS to simulate FCOPYSIGN. 397 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 398 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 399 400 // We don't support sin/cos/fmod 401 setOperationAction(ISD::FSIN , MVT::f32, Expand); 402 setOperationAction(ISD::FCOS , MVT::f32, Expand); 403 404 // Special cases we handle for FP constants. 405 addLegalFPImmediate(APFloat(+0.0f)); // xorps 406 addLegalFPImmediate(APFloat(+0.0)); // FLD0 407 addLegalFPImmediate(APFloat(+1.0)); // FLD1 408 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 409 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 410 411 // SSE <-> X87 conversions go through memory. If optimizing, we lie about 412 // this though and handle it in InstructionSelectPreprocess so that 413 // dagcombine2 can hack on these. 414 if (Fast) { 415 setConvertAction(MVT::f32, MVT::f64, Expand); 416 setConvertAction(MVT::f32, MVT::f80, Expand); 417 setConvertAction(MVT::f80, MVT::f32, Expand); 418 setConvertAction(MVT::f64, MVT::f32, Expand); 419 // And x87->x87 truncations also. 420 setConvertAction(MVT::f80, MVT::f64, Expand); 421 } 422 423 if (!UnsafeFPMath) { 424 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 425 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 426 } 427 } else { 428 // f32 and f64 in x87. 429 // Set up the FP register classes. 430 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 431 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 432 433 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 434 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 435 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 436 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 437 438 // Floating truncations go through memory. If optimizing, we lie about 439 // this though and handle it in InstructionSelectPreprocess so that 440 // dagcombine2 can hack on these. 441 if (Fast) { 442 setConvertAction(MVT::f80, MVT::f32, Expand); 443 setConvertAction(MVT::f64, MVT::f32, Expand); 444 setConvertAction(MVT::f80, MVT::f64, Expand); 445 } 446 447 if (!UnsafeFPMath) { 448 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 449 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 450 } 451 addLegalFPImmediate(APFloat(+0.0)); // FLD0 452 addLegalFPImmediate(APFloat(+1.0)); // FLD1 453 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 454 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 455 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 456 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 457 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 458 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 459 } 460 461 // Long double always uses X87. 462 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 463 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 464 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 465 { 466 APFloat TmpFlt(+0.0); 467 TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 468 addLegalFPImmediate(TmpFlt); // FLD0 469 TmpFlt.changeSign(); 470 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 471 APFloat TmpFlt2(+1.0); 472 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 473 addLegalFPImmediate(TmpFlt2); // FLD1 474 TmpFlt2.changeSign(); 475 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 476 } 477 478 if (!UnsafeFPMath) { 479 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 480 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 481 } 482 483 // Always use a library call for pow. 484 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 485 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 486 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 487 488 // First set operation action for all vector types to expand. Then we 489 // will selectively turn on ones that can be effectively codegen'd. 490 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 491 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 492 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); 493 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); 494 setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); 495 setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); 496 setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); 497 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 498 setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); 499 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 500 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 501 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 502 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 503 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 504 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); 505 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); 506 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 507 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 508 setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand); 509 setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand); 510 setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand); 511 setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand); 512 setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand); 513 setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand); 514 setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand); 515 setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand); 516 setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand); 517 setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand); 518 setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand); 519 setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand); 520 setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand); 521 setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand); 522 setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand); 523 setOperationAction(ISD::SHL, (MVT::ValueType)VT, Expand); 524 setOperationAction(ISD::SRA, (MVT::ValueType)VT, Expand); 525 setOperationAction(ISD::SRL, (MVT::ValueType)VT, Expand); 526 setOperationAction(ISD::ROTL, (MVT::ValueType)VT, Expand); 527 setOperationAction(ISD::ROTR, (MVT::ValueType)VT, Expand); 528 setOperationAction(ISD::BSWAP, (MVT::ValueType)VT, Expand); 529 } 530 531 if (Subtarget->hasMMX()) { 532 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass); 533 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass); 534 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass); 535 addRegisterClass(MVT::v1i64, X86::VR64RegisterClass); 536 537 // FIXME: add MMX packed arithmetics 538 539 setOperationAction(ISD::ADD, MVT::v8i8, Legal); 540 setOperationAction(ISD::ADD, MVT::v4i16, Legal); 541 setOperationAction(ISD::ADD, MVT::v2i32, Legal); 542 setOperationAction(ISD::ADD, MVT::v1i64, Legal); 543 544 setOperationAction(ISD::SUB, MVT::v8i8, Legal); 545 setOperationAction(ISD::SUB, MVT::v4i16, Legal); 546 setOperationAction(ISD::SUB, MVT::v2i32, Legal); 547 setOperationAction(ISD::SUB, MVT::v1i64, Legal); 548 549 setOperationAction(ISD::MULHS, MVT::v4i16, Legal); 550 setOperationAction(ISD::MUL, MVT::v4i16, Legal); 551 552 setOperationAction(ISD::AND, MVT::v8i8, Promote); 553 AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64); 554 setOperationAction(ISD::AND, MVT::v4i16, Promote); 555 AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64); 556 setOperationAction(ISD::AND, MVT::v2i32, Promote); 557 AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64); 558 setOperationAction(ISD::AND, MVT::v1i64, Legal); 559 560 setOperationAction(ISD::OR, MVT::v8i8, Promote); 561 AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64); 562 setOperationAction(ISD::OR, MVT::v4i16, Promote); 563 AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64); 564 setOperationAction(ISD::OR, MVT::v2i32, Promote); 565 AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64); 566 setOperationAction(ISD::OR, MVT::v1i64, Legal); 567 568 setOperationAction(ISD::XOR, MVT::v8i8, Promote); 569 AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64); 570 setOperationAction(ISD::XOR, MVT::v4i16, Promote); 571 AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64); 572 setOperationAction(ISD::XOR, MVT::v2i32, Promote); 573 AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64); 574 setOperationAction(ISD::XOR, MVT::v1i64, Legal); 575 576 setOperationAction(ISD::LOAD, MVT::v8i8, Promote); 577 AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64); 578 setOperationAction(ISD::LOAD, MVT::v4i16, Promote); 579 AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64); 580 setOperationAction(ISD::LOAD, MVT::v2i32, Promote); 581 AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64); 582 setOperationAction(ISD::LOAD, MVT::v1i64, Legal); 583 584 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); 585 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 586 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); 587 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); 588 589 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 590 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 591 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); 592 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); 593 594 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom); 595 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom); 596 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom); 597 } 598 599 if (Subtarget->hasSSE1()) { 600 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 601 602 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 603 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 604 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 605 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 606 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 607 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 608 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 609 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 610 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 611 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 612 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 613 } 614 615 if (Subtarget->hasSSE2()) { 616 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 617 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 618 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 619 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 620 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 621 622 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 623 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 624 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 625 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 626 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 627 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 628 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 629 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 630 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 631 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 632 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 633 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 634 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 635 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 636 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 637 638 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 639 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 640 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 641 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 642 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 643 644 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 645 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 646 // Do not attempt to custom lower non-power-of-2 vectors 647 if (!isPowerOf2_32(MVT::getVectorNumElements(VT))) 648 continue; 649 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); 650 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); 651 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); 652 } 653 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 654 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 655 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 656 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 657 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 658 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 659 if (Subtarget->is64Bit()) { 660 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 661 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 662 } 663 664 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 665 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 666 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); 667 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); 668 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); 669 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); 670 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); 671 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); 672 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); 673 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); 674 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 675 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); 676 } 677 678 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 679 680 // Custom lower v2i64 and v2f64 selects. 681 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 682 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 683 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 684 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 685 } 686 687 if (Subtarget->hasSSE41()) { 688 // FIXME: Do we need to handle scalar-to-vector here? 689 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 690 691 // i8 and i16 vectors are custom , because the source register and source 692 // source memory operand types are not the same width. f32 vectors are 693 // custom since the immediate controlling the insert encodes additional 694 // information. 695 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 696 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 697 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Legal); 698 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 699 700 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 701 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 702 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 703 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 704 705 if (Subtarget->is64Bit()) { 706 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal); 707 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 708 } 709 } 710 711 // We want to custom lower some of our intrinsics. 712 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 713 714 // We have target-specific dag combine patterns for the following nodes: 715 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 716 setTargetDAGCombine(ISD::SELECT); 717 setTargetDAGCombine(ISD::STORE); 718 719 computeRegisterProperties(); 720 721 // FIXME: These should be based on subtarget info. Plus, the values should 722 // be smaller when we are in optimizing for size mode. 723 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores 724 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores 725 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores 726 allowUnalignedMemoryAccesses = true; // x86 supports it! 727 setPrefLoopAlignment(16); 728} 729 730 731MVT::ValueType 732X86TargetLowering::getSetCCResultType(const SDOperand &) const { 733 return MVT::i8; 734} 735 736 737/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 738/// the desired ByVal argument alignment. 739static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) { 740 if (MaxAlign == 16) 741 return; 742 if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 743 if (VTy->getBitWidth() == 128) 744 MaxAlign = 16; 745 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 746 unsigned EltAlign = 0; 747 getMaxByValAlign(ATy->getElementType(), EltAlign); 748 if (EltAlign > MaxAlign) 749 MaxAlign = EltAlign; 750 } else if (const StructType *STy = dyn_cast<StructType>(Ty)) { 751 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 752 unsigned EltAlign = 0; 753 getMaxByValAlign(STy->getElementType(i), EltAlign); 754 if (EltAlign > MaxAlign) 755 MaxAlign = EltAlign; 756 if (MaxAlign == 16) 757 break; 758 } 759 } 760 return; 761} 762 763/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 764/// function arguments in the caller parameter area. For X86, aggregates 765/// that contain SSE vectors are placed at 16-byte boundaries while the rest 766/// are at 4-byte boundaries. 767unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const { 768 if (Subtarget->is64Bit()) 769 return getTargetData()->getABITypeAlignment(Ty); 770 unsigned Align = 4; 771 if (Subtarget->hasSSE1()) 772 getMaxByValAlign(Ty, Align); 773 return Align; 774} 775 776/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 777/// jumptable. 778SDOperand X86TargetLowering::getPICJumpTableRelocBase(SDOperand Table, 779 SelectionDAG &DAG) const { 780 if (usesGlobalOffsetTable()) 781 return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy()); 782 if (!Subtarget->isPICStyleRIPRel()) 783 return DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()); 784 return Table; 785} 786 787//===----------------------------------------------------------------------===// 788// Return Value Calling Convention Implementation 789//===----------------------------------------------------------------------===// 790 791#include "X86GenCallingConv.inc" 792 793/// GetPossiblePreceedingTailCall - Get preceeding X86ISD::TAILCALL node if it 794/// exists skip possible ISD:TokenFactor. 795static SDOperand GetPossiblePreceedingTailCall(SDOperand Chain) { 796 if (Chain.getOpcode() == X86ISD::TAILCALL) { 797 return Chain; 798 } else if (Chain.getOpcode() == ISD::TokenFactor) { 799 if (Chain.getNumOperands() && 800 Chain.getOperand(0).getOpcode() == X86ISD::TAILCALL) 801 return Chain.getOperand(0); 802 } 803 return Chain; 804} 805 806/// LowerRET - Lower an ISD::RET node. 807SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { 808 assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args"); 809 810 SmallVector<CCValAssign, 16> RVLocs; 811 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 812 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 813 CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs); 814 CCInfo.AnalyzeReturn(Op.Val, RetCC_X86); 815 816 // If this is the first return lowered for this function, add the regs to the 817 // liveout set for the function. 818 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 819 for (unsigned i = 0; i != RVLocs.size(); ++i) 820 if (RVLocs[i].isRegLoc()) 821 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 822 } 823 SDOperand Chain = Op.getOperand(0); 824 825 // Handle tail call return. 826 Chain = GetPossiblePreceedingTailCall(Chain); 827 if (Chain.getOpcode() == X86ISD::TAILCALL) { 828 SDOperand TailCall = Chain; 829 SDOperand TargetAddress = TailCall.getOperand(1); 830 SDOperand StackAdjustment = TailCall.getOperand(2); 831 assert(((TargetAddress.getOpcode() == ISD::Register && 832 (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::ECX || 833 cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) || 834 TargetAddress.getOpcode() == ISD::TargetExternalSymbol || 835 TargetAddress.getOpcode() == ISD::TargetGlobalAddress) && 836 "Expecting an global address, external symbol, or register"); 837 assert(StackAdjustment.getOpcode() == ISD::Constant && 838 "Expecting a const value"); 839 840 SmallVector<SDOperand,8> Operands; 841 Operands.push_back(Chain.getOperand(0)); 842 Operands.push_back(TargetAddress); 843 Operands.push_back(StackAdjustment); 844 // Copy registers used by the call. Last operand is a flag so it is not 845 // copied. 846 for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) { 847 Operands.push_back(Chain.getOperand(i)); 848 } 849 return DAG.getNode(X86ISD::TC_RETURN, MVT::Other, &Operands[0], 850 Operands.size()); 851 } 852 853 // Regular return. 854 SDOperand Flag; 855 856 // Copy the result values into the output registers. 857 if (RVLocs.size() != 1 || !RVLocs[0].isRegLoc() || 858 RVLocs[0].getLocReg() != X86::ST0) { 859 for (unsigned i = 0; i != RVLocs.size(); ++i) { 860 CCValAssign &VA = RVLocs[i]; 861 assert(VA.isRegLoc() && "Can only return in registers!"); 862 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1), 863 Flag); 864 Flag = Chain.getValue(1); 865 } 866 } else { 867 // We need to handle a destination of ST0 specially, because it isn't really 868 // a register. 869 SDOperand Value = Op.getOperand(1); 870 871 // an XMM register onto the fp-stack. Do this with an FP_EXTEND to f80. 872 // This will get legalized into a load/store if it can't get optimized away. 873 if (isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) 874 Value = DAG.getNode(ISD::FP_EXTEND, MVT::f80, Value); 875 876 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 877 SDOperand Ops[] = { Chain, Value }; 878 Chain = DAG.getNode(X86ISD::FP_SET_ST0, Tys, Ops, 2); 879 Flag = Chain.getValue(1); 880 } 881 882 SDOperand BytesToPop = DAG.getConstant(getBytesToPopOnReturn(), MVT::i16); 883 if (Flag.Val) 884 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop, Flag); 885 else 886 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop); 887} 888 889 890/// LowerCallResult - Lower the result values of an ISD::CALL into the 891/// appropriate copies out of appropriate physical registers. This assumes that 892/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call 893/// being lowered. The returns a SDNode with the same number of values as the 894/// ISD::CALL. 895SDNode *X86TargetLowering:: 896LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, 897 unsigned CallingConv, SelectionDAG &DAG) { 898 899 // Assign locations to each value returned by this call. 900 SmallVector<CCValAssign, 16> RVLocs; 901 bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0; 902 CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs); 903 CCInfo.AnalyzeCallResult(TheCall, RetCC_X86); 904 905 SmallVector<SDOperand, 8> ResultVals; 906 907 // Copy all of the result registers out of their specified physreg. 908 if (RVLocs.size() != 1 || RVLocs[0].getLocReg() != X86::ST0) { 909 for (unsigned i = 0; i != RVLocs.size(); ++i) { 910 Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(), 911 RVLocs[i].getValVT(), InFlag).getValue(1); 912 InFlag = Chain.getValue(2); 913 ResultVals.push_back(Chain.getValue(0)); 914 } 915 } else { 916 // Copies from the FP stack are special, as ST0 isn't a valid register 917 // before the fp stackifier runs. 918 919 // Copy ST0 into an RFP register with FP_GET_RESULT. If this will end up 920 // in an SSE register, copy it out as F80 and do a truncate, otherwise use 921 // the specified value type. 922 MVT::ValueType GetResultTy = RVLocs[0].getValVT(); 923 if (isScalarFPTypeInSSEReg(GetResultTy)) 924 GetResultTy = MVT::f80; 925 SDVTList Tys = DAG.getVTList(GetResultTy, MVT::Other, MVT::Flag); 926 SDOperand GROps[] = { Chain, InFlag }; 927 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_ST0, Tys, GROps, 2); 928 Chain = RetVal.getValue(1); 929 InFlag = RetVal.getValue(2); 930 931 // If we want the result in an SSE register, use an FP_TRUNCATE to get it 932 // there. 933 if (GetResultTy != RVLocs[0].getValVT()) 934 RetVal = DAG.getNode(ISD::FP_ROUND, RVLocs[0].getValVT(), RetVal, 935 // This truncation won't change the value. 936 DAG.getIntPtrConstant(1)); 937 938 ResultVals.push_back(RetVal); 939 } 940 941 // Merge everything together with a MERGE_VALUES node. 942 ResultVals.push_back(Chain); 943 return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(), 944 &ResultVals[0], ResultVals.size()).Val; 945} 946 947/// LowerCallResultToTwo64BitRegs - Lower the result values of an x86-64 948/// ISD::CALL where the results are known to be in two 64-bit registers, 949/// e.g. XMM0 and XMM1. This simplify store the two values back to the 950/// fixed stack slot allocated for StructRet. 951SDNode *X86TargetLowering:: 952LowerCallResultToTwo64BitRegs(SDOperand Chain, SDOperand InFlag, 953 SDNode *TheCall, unsigned Reg1, unsigned Reg2, 954 MVT::ValueType VT, SelectionDAG &DAG) { 955 SDOperand RetVal1 = DAG.getCopyFromReg(Chain, Reg1, VT, InFlag); 956 Chain = RetVal1.getValue(1); 957 InFlag = RetVal1.getValue(2); 958 SDOperand RetVal2 = DAG.getCopyFromReg(Chain, Reg2, VT, InFlag); 959 Chain = RetVal2.getValue(1); 960 InFlag = RetVal2.getValue(2); 961 SDOperand FIN = TheCall->getOperand(5); 962 Chain = DAG.getStore(Chain, RetVal1, FIN, NULL, 0); 963 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); 964 Chain = DAG.getStore(Chain, RetVal2, FIN, NULL, 0); 965 return Chain.Val; 966} 967 968/// LowerCallResultToTwoX87Regs - Lower the result values of an x86-64 ISD::CALL 969/// where the results are known to be in ST0 and ST1. 970SDNode *X86TargetLowering:: 971LowerCallResultToTwoX87Regs(SDOperand Chain, SDOperand InFlag, 972 SDNode *TheCall, SelectionDAG &DAG) { 973 SmallVector<SDOperand, 8> ResultVals; 974 const MVT::ValueType VTs[] = { MVT::f80, MVT::f80, MVT::Other, MVT::Flag }; 975 SDVTList Tys = DAG.getVTList(VTs, 4); 976 SDOperand Ops[] = { Chain, InFlag }; 977 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_ST0_ST1, Tys, Ops, 2); 978 Chain = RetVal.getValue(2); 979 SDOperand FIN = TheCall->getOperand(5); 980 Chain = DAG.getStore(Chain, RetVal.getValue(1), FIN, NULL, 0); 981 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(16)); 982 Chain = DAG.getStore(Chain, RetVal, FIN, NULL, 0); 983 return Chain.Val; 984} 985 986//===----------------------------------------------------------------------===// 987// C & StdCall & Fast Calling Convention implementation 988//===----------------------------------------------------------------------===// 989// StdCall calling convention seems to be standard for many Windows' API 990// routines and around. It differs from C calling convention just a little: 991// callee should clean up the stack, not caller. Symbols should be also 992// decorated in some fancy way :) It doesn't support any vector arguments. 993// For info on fast calling convention see Fast Calling Convention (tail call) 994// implementation LowerX86_32FastCCCallTo. 995 996/// AddLiveIn - This helper function adds the specified physical register to the 997/// MachineFunction as a live in value. It also creates a corresponding virtual 998/// register for it. 999static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg, 1000 const TargetRegisterClass *RC) { 1001 assert(RC->contains(PReg) && "Not the correct regclass!"); 1002 unsigned VReg = MF.getRegInfo().createVirtualRegister(RC); 1003 MF.getRegInfo().addLiveIn(PReg, VReg); 1004 return VReg; 1005} 1006 1007/// CallIsStructReturn - Determines whether a CALL node uses struct return 1008/// semantics. 1009static bool CallIsStructReturn(SDOperand Op) { 1010 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 1011 if (!NumOps) 1012 return false; 1013 1014 ConstantSDNode *Flags = cast<ConstantSDNode>(Op.getOperand(6)); 1015 return Flags->getValue() & ISD::ParamFlags::StructReturn; 1016} 1017 1018/// ArgsAreStructReturn - Determines whether a FORMAL_ARGUMENTS node uses struct 1019/// return semantics. 1020static bool ArgsAreStructReturn(SDOperand Op) { 1021 unsigned NumArgs = Op.Val->getNumValues() - 1; 1022 if (!NumArgs) 1023 return false; 1024 1025 ConstantSDNode *Flags = cast<ConstantSDNode>(Op.getOperand(3)); 1026 return Flags->getValue() & ISD::ParamFlags::StructReturn; 1027} 1028 1029/// IsCalleePop - Determines whether a CALL or FORMAL_ARGUMENTS node requires the 1030/// callee to pop its own arguments. Callee pop is necessary to support tail 1031/// calls. 1032bool X86TargetLowering::IsCalleePop(SDOperand Op) { 1033 bool IsVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1034 if (IsVarArg) 1035 return false; 1036 1037 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { 1038 default: 1039 return false; 1040 case CallingConv::X86_StdCall: 1041 return !Subtarget->is64Bit(); 1042 case CallingConv::X86_FastCall: 1043 return !Subtarget->is64Bit(); 1044 case CallingConv::Fast: 1045 return PerformTailCallOpt; 1046 } 1047} 1048 1049/// CCAssignFnForNode - Selects the correct CCAssignFn for a CALL or 1050/// FORMAL_ARGUMENTS node. 1051CCAssignFn *X86TargetLowering::CCAssignFnForNode(SDOperand Op) const { 1052 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1053 1054 if (Subtarget->is64Bit()) { 1055 if (CC == CallingConv::Fast && PerformTailCallOpt) 1056 return CC_X86_64_TailCall; 1057 else 1058 return CC_X86_64_C; 1059 } 1060 1061 if (CC == CallingConv::X86_FastCall) 1062 return CC_X86_32_FastCall; 1063 else if (CC == CallingConv::Fast && PerformTailCallOpt) 1064 return CC_X86_32_TailCall; 1065 else 1066 return CC_X86_32_C; 1067} 1068 1069/// NameDecorationForFORMAL_ARGUMENTS - Selects the appropriate decoration to 1070/// apply to a MachineFunction containing a given FORMAL_ARGUMENTS node. 1071NameDecorationStyle 1072X86TargetLowering::NameDecorationForFORMAL_ARGUMENTS(SDOperand Op) { 1073 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1074 if (CC == CallingConv::X86_FastCall) 1075 return FastCall; 1076 else if (CC == CallingConv::X86_StdCall) 1077 return StdCall; 1078 return None; 1079} 1080 1081/// IsPossiblyOverwrittenArgumentOfTailCall - Check if the operand could 1082/// possibly be overwritten when lowering the outgoing arguments in a tail 1083/// call. Currently the implementation of this call is very conservative and 1084/// assumes all arguments sourcing from FORMAL_ARGUMENTS or a CopyFromReg with 1085/// virtual registers would be overwritten by direct lowering. 1086static bool IsPossiblyOverwrittenArgumentOfTailCall(SDOperand Op, 1087 MachineFrameInfo * MFI) { 1088 RegisterSDNode * OpReg = NULL; 1089 FrameIndexSDNode * FrameIdxNode = NULL; 1090 int FrameIdx = 0; 1091 if (Op.getOpcode() == ISD::FORMAL_ARGUMENTS || 1092 (Op.getOpcode()== ISD::CopyFromReg && 1093 (OpReg = dyn_cast<RegisterSDNode>(Op.getOperand(1))) && 1094 (OpReg->getReg() >= TargetRegisterInfo::FirstVirtualRegister)) || 1095 (Op.getOpcode() == ISD::LOAD && 1096 (FrameIdxNode = dyn_cast<FrameIndexSDNode>(Op.getOperand(1))) && 1097 (MFI->isFixedObjectIndex((FrameIdx = FrameIdxNode->getIndex()))) && 1098 (MFI->getObjectOffset(FrameIdx) >= 0))) 1099 return true; 1100 return false; 1101} 1102 1103/// CallRequiresGOTInRegister - Check whether the call requires the GOT pointer 1104/// in a register before calling. 1105bool X86TargetLowering::CallRequiresGOTPtrInReg(bool Is64Bit, bool IsTailCall) { 1106 return !IsTailCall && !Is64Bit && 1107 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1108 Subtarget->isPICStyleGOT(); 1109} 1110 1111 1112/// CallRequiresFnAddressInReg - Check whether the call requires the function 1113/// address to be loaded in a register. 1114bool 1115X86TargetLowering::CallRequiresFnAddressInReg(bool Is64Bit, bool IsTailCall) { 1116 return !Is64Bit && IsTailCall && 1117 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1118 Subtarget->isPICStyleGOT(); 1119} 1120 1121/// CopyTailCallClobberedArgumentsToVRegs - Create virtual registers for all 1122/// arguments to force loading and guarantee that arguments sourcing from 1123/// incomming parameters are not overwriting each other. 1124static SDOperand 1125CopyTailCallClobberedArgumentsToVRegs(SDOperand Chain, 1126 SmallVector<std::pair<unsigned, SDOperand>, 8> &TailCallClobberedVRegs, 1127 SelectionDAG &DAG, 1128 MachineFunction &MF, 1129 const TargetLowering * TL) { 1130 1131 SDOperand InFlag; 1132 for (unsigned i = 0, e = TailCallClobberedVRegs.size(); i != e; i++) { 1133 SDOperand Arg = TailCallClobberedVRegs[i].second; 1134 unsigned Idx = TailCallClobberedVRegs[i].first; 1135 unsigned VReg = 1136 MF.getRegInfo(). 1137 createVirtualRegister(TL->getRegClassFor(Arg.getValueType())); 1138 Chain = DAG.getCopyToReg(Chain, VReg, Arg, InFlag); 1139 InFlag = Chain.getValue(1); 1140 Arg = DAG.getCopyFromReg(Chain, VReg, Arg.getValueType(), InFlag); 1141 TailCallClobberedVRegs[i] = std::make_pair(Idx, Arg); 1142 Chain = Arg.getValue(1); 1143 InFlag = Arg.getValue(2); 1144 } 1145 return Chain; 1146} 1147 1148/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1149/// by "Src" to address "Dst" with size and alignment information specified by 1150/// the specific parameter attribute. The copy will be passed as a byval function 1151/// parameter. 1152static SDOperand 1153CreateCopyOfByValArgument(SDOperand Src, SDOperand Dst, SDOperand Chain, 1154 ISD::ParamFlags::ParamFlagsTy Flags, 1155 SelectionDAG &DAG) { 1156 unsigned Align = ISD::ParamFlags::One << 1157 ((Flags & ISD::ParamFlags::ByValAlign) >> ISD::ParamFlags::ByValAlignOffs); 1158 unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >> 1159 ISD::ParamFlags::ByValSizeOffs; 1160 SDOperand AlignNode = DAG.getConstant(Align, MVT::i32); 1161 SDOperand SizeNode = DAG.getConstant(Size, MVT::i32); 1162 SDOperand AlwaysInline = DAG.getConstant(1, MVT::i32); 1163 return DAG.getMemcpy(Chain, Dst, Src, SizeNode, AlignNode, AlwaysInline); 1164} 1165 1166SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG, 1167 const CCValAssign &VA, 1168 MachineFrameInfo *MFI, 1169 unsigned CC, 1170 SDOperand Root, unsigned i) { 1171 // Create the nodes corresponding to a load from this parameter slot. 1172 ISD::ParamFlags::ParamFlagsTy Flags = 1173 cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue(); 1174 bool AlwaysUseMutable = (CC==CallingConv::Fast) && PerformTailCallOpt; 1175 bool isByVal = Flags & ISD::ParamFlags::ByVal; 1176 bool isImmutable = !AlwaysUseMutable && !isByVal; 1177 1178 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1179 // changed with more analysis. 1180 // In case of tail call optimization mark all arguments mutable. Since they 1181 // could be overwritten by lowering of arguments in case of a tail call. 1182 int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, 1183 VA.getLocMemOffset(), isImmutable); 1184 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 1185 if (isByVal) 1186 return FIN; 1187 return DAG.getLoad(VA.getValVT(), Root, FIN, 1188 PseudoSourceValue::getFixedStack(), FI); 1189} 1190 1191SDOperand 1192X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { 1193 MachineFunction &MF = DAG.getMachineFunction(); 1194 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1195 1196 const Function* Fn = MF.getFunction(); 1197 if (Fn->hasExternalLinkage() && 1198 Subtarget->isTargetCygMing() && 1199 Fn->getName() == "main") 1200 FuncInfo->setForceFramePointer(true); 1201 1202 // Decorate the function name. 1203 FuncInfo->setDecorationStyle(NameDecorationForFORMAL_ARGUMENTS(Op)); 1204 1205 MachineFrameInfo *MFI = MF.getFrameInfo(); 1206 SDOperand Root = Op.getOperand(0); 1207 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1208 unsigned CC = MF.getFunction()->getCallingConv(); 1209 bool Is64Bit = Subtarget->is64Bit(); 1210 1211 assert(!(isVarArg && CC == CallingConv::Fast) && 1212 "Var args not supported with calling convention fastcc"); 1213 1214 // Assign locations to all of the incoming arguments. 1215 SmallVector<CCValAssign, 16> ArgLocs; 1216 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1217 CCInfo.AnalyzeFormalArguments(Op.Val, CCAssignFnForNode(Op)); 1218 1219 SmallVector<SDOperand, 8> ArgValues; 1220 unsigned LastVal = ~0U; 1221 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1222 CCValAssign &VA = ArgLocs[i]; 1223 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1224 // places. 1225 assert(VA.getValNo() != LastVal && 1226 "Don't support value assigned to multiple locs yet"); 1227 LastVal = VA.getValNo(); 1228 1229 if (VA.isRegLoc()) { 1230 MVT::ValueType RegVT = VA.getLocVT(); 1231 TargetRegisterClass *RC; 1232 if (RegVT == MVT::i32) 1233 RC = X86::GR32RegisterClass; 1234 else if (Is64Bit && RegVT == MVT::i64) 1235 RC = X86::GR64RegisterClass; 1236 else if (RegVT == MVT::f32) 1237 RC = X86::FR32RegisterClass; 1238 else if (RegVT == MVT::f64) 1239 RC = X86::FR64RegisterClass; 1240 else { 1241 assert(MVT::isVector(RegVT)); 1242 if (Is64Bit && MVT::getSizeInBits(RegVT) == 64) { 1243 RC = X86::GR64RegisterClass; // MMX values are passed in GPRs. 1244 RegVT = MVT::i64; 1245 } else 1246 RC = X86::VR128RegisterClass; 1247 } 1248 1249 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 1250 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 1251 1252 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1253 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1254 // right size. 1255 if (VA.getLocInfo() == CCValAssign::SExt) 1256 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 1257 DAG.getValueType(VA.getValVT())); 1258 else if (VA.getLocInfo() == CCValAssign::ZExt) 1259 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 1260 DAG.getValueType(VA.getValVT())); 1261 1262 if (VA.getLocInfo() != CCValAssign::Full) 1263 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 1264 1265 // Handle MMX values passed in GPRs. 1266 if (Is64Bit && RegVT != VA.getLocVT() && RC == X86::GR64RegisterClass && 1267 MVT::getSizeInBits(RegVT) == 64) 1268 ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); 1269 1270 ArgValues.push_back(ArgValue); 1271 } else { 1272 assert(VA.isMemLoc()); 1273 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, CC, Root, i)); 1274 } 1275 } 1276 1277 unsigned StackSize = CCInfo.getNextStackOffset(); 1278 // align stack specially for tail calls 1279 if (CC == CallingConv::Fast) 1280 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1281 1282 // If the function takes variable number of arguments, make a frame index for 1283 // the start of the first vararg value... for expansion of llvm.va_start. 1284 if (isVarArg) { 1285 if (Is64Bit || CC != CallingConv::X86_FastCall) { 1286 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); 1287 } 1288 if (Is64Bit) { 1289 static const unsigned GPR64ArgRegs[] = { 1290 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1291 }; 1292 static const unsigned XMMArgRegs[] = { 1293 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1294 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1295 }; 1296 1297 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 6); 1298 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1299 1300 // For X86-64, if there are vararg parameters that are passed via 1301 // registers, then we must store them to their spots on the stack so they 1302 // may be loaded by deferencing the result of va_next. 1303 VarArgsGPOffset = NumIntRegs * 8; 1304 VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16; 1305 RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16); 1306 1307 // Store the integer parameter registers. 1308 SmallVector<SDOperand, 8> MemOps; 1309 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 1310 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1311 DAG.getIntPtrConstant(VarArgsGPOffset)); 1312 for (; NumIntRegs != 6; ++NumIntRegs) { 1313 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], 1314 X86::GR64RegisterClass); 1315 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1316 SDOperand Store = 1317 DAG.getStore(Val.getValue(1), Val, FIN, 1318 PseudoSourceValue::getFixedStack(), 1319 RegSaveFrameIndex); 1320 MemOps.push_back(Store); 1321 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1322 DAG.getIntPtrConstant(8)); 1323 } 1324 1325 // Now store the XMM (fp + vector) parameter registers. 1326 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1327 DAG.getIntPtrConstant(VarArgsFPOffset)); 1328 for (; NumXMMRegs != 8; ++NumXMMRegs) { 1329 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 1330 X86::VR128RegisterClass); 1331 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); 1332 SDOperand Store = 1333 DAG.getStore(Val.getValue(1), Val, FIN, 1334 PseudoSourceValue::getFixedStack(), 1335 RegSaveFrameIndex); 1336 MemOps.push_back(Store); 1337 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1338 DAG.getIntPtrConstant(16)); 1339 } 1340 if (!MemOps.empty()) 1341 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 1342 &MemOps[0], MemOps.size()); 1343 } 1344 } 1345 1346 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1347 // arguments and the arguments after the retaddr has been pushed are 1348 // aligned. 1349 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1350 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1351 (StackSize & 7) == 0) 1352 StackSize += 4; 1353 1354 ArgValues.push_back(Root); 1355 1356 // Some CCs need callee pop. 1357 if (IsCalleePop(Op)) { 1358 BytesToPopOnReturn = StackSize; // Callee pops everything. 1359 BytesCallerReserves = 0; 1360 } else { 1361 BytesToPopOnReturn = 0; // Callee pops nothing. 1362 // If this is an sret function, the return should pop the hidden pointer. 1363 if (!Is64Bit && ArgsAreStructReturn(Op)) 1364 BytesToPopOnReturn = 4; 1365 BytesCallerReserves = StackSize; 1366 } 1367 1368 if (!Is64Bit) { 1369 RegSaveFrameIndex = 0xAAAAAAA; // RegSaveFrameIndex is X86-64 only. 1370 if (CC == CallingConv::X86_FastCall) 1371 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 1372 } 1373 1374 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 1375 1376 // Return the new list of results. 1377 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1378 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1379} 1380 1381SDOperand 1382X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG, 1383 const SDOperand &StackPtr, 1384 const CCValAssign &VA, 1385 SDOperand Chain, 1386 SDOperand Arg) { 1387 unsigned LocMemOffset = VA.getLocMemOffset(); 1388 SDOperand PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1389 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1390 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1391 ISD::ParamFlags::ParamFlagsTy Flags = 1392 cast<ConstantSDNode>(FlagsOp)->getValue(); 1393 if (Flags & ISD::ParamFlags::ByVal) { 1394 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG); 1395 } 1396 return DAG.getStore(Chain, Arg, PtrOff, 1397 PseudoSourceValue::getStack(), LocMemOffset); 1398} 1399 1400/// ClassifyX86_64SRetCallReturn - Classify how to implement a x86-64 1401/// struct return call to the specified function. X86-64 ABI specifies 1402/// some SRet calls are actually returned in registers. Since current 1403/// LLVM cannot represent multi-value calls, they are represent as 1404/// calls where the results are passed in a hidden struct provided by 1405/// the caller. This function examines the type of the struct to 1406/// determine the correct way to implement the call. 1407X86::X86_64SRet 1408X86TargetLowering::ClassifyX86_64SRetCallReturn(const Function *Fn) { 1409 // FIXME: Disabled for now. 1410 return X86::InMemory; 1411 1412 const PointerType *PTy = cast<PointerType>(Fn->arg_begin()->getType()); 1413 const Type *RTy = PTy->getElementType(); 1414 unsigned Size = getTargetData()->getABITypeSize(RTy); 1415 if (Size != 16 && Size != 32) 1416 return X86::InMemory; 1417 1418 if (Size == 32) { 1419 const StructType *STy = dyn_cast<StructType>(RTy); 1420 if (!STy) return X86::InMemory; 1421 if (STy->getNumElements() == 2 && 1422 STy->getElementType(0) == Type::X86_FP80Ty && 1423 STy->getElementType(1) == Type::X86_FP80Ty) 1424 return X86::InX87; 1425 } 1426 1427 bool AllFP = true; 1428 for (Type::subtype_iterator I = RTy->subtype_begin(), E = RTy->subtype_end(); 1429 I != E; ++I) { 1430 const Type *STy = I->get(); 1431 if (!STy->isFPOrFPVector()) { 1432 AllFP = false; 1433 break; 1434 } 1435 } 1436 1437 if (AllFP) 1438 return X86::InSSE; 1439 return X86::InGPR64; 1440} 1441 1442void X86TargetLowering::X86_64AnalyzeSRetCallOperands(SDNode *TheCall, 1443 CCAssignFn *Fn, 1444 CCState &CCInfo) { 1445 unsigned NumOps = (TheCall->getNumOperands() - 5) / 2; 1446 for (unsigned i = 1; i != NumOps; ++i) { 1447 MVT::ValueType ArgVT = TheCall->getOperand(5+2*i).getValueType(); 1448 SDOperand FlagOp = TheCall->getOperand(5+2*i+1); 1449 unsigned ArgFlags =cast<ConstantSDNode>(FlagOp)->getValue(); 1450 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo)) { 1451 cerr << "Call operand #" << i << " has unhandled type " 1452 << MVT::getValueTypeString(ArgVT) << "\n"; 1453 abort(); 1454 } 1455 } 1456} 1457 1458SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { 1459 MachineFunction &MF = DAG.getMachineFunction(); 1460 MachineFrameInfo * MFI = MF.getFrameInfo(); 1461 SDOperand Chain = Op.getOperand(0); 1462 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1463 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1464 bool IsTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0 1465 && CC == CallingConv::Fast && PerformTailCallOpt; 1466 SDOperand Callee = Op.getOperand(4); 1467 bool Is64Bit = Subtarget->is64Bit(); 1468 bool IsStructRet = CallIsStructReturn(Op); 1469 1470 assert(!(isVarArg && CC == CallingConv::Fast) && 1471 "Var args not supported with calling convention fastcc"); 1472 1473 // Analyze operands of the call, assigning locations to each operand. 1474 SmallVector<CCValAssign, 16> ArgLocs; 1475 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1476 CCAssignFn *CCFn = CCAssignFnForNode(Op); 1477 1478 X86::X86_64SRet SRetMethod = X86::InMemory; 1479 if (Is64Bit && IsStructRet) 1480 // FIXME: We can't figure out type of the sret structure for indirect 1481 // calls. We need to copy more information from CallSite to the ISD::CALL 1482 // node. 1483 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1484 SRetMethod = 1485 ClassifyX86_64SRetCallReturn(dyn_cast<Function>(G->getGlobal())); 1486 1487 // UGLY HACK! For x86-64, some 128-bit aggregates are returns in a pair of 1488 // registers. Unfortunately, llvm does not support i128 yet so we pretend it's 1489 // a sret call. 1490 if (SRetMethod != X86::InMemory) 1491 X86_64AnalyzeSRetCallOperands(Op.Val, CCFn, CCInfo); 1492 else 1493 CCInfo.AnalyzeCallOperands(Op.Val, CCFn); 1494 1495 // Get a count of how many bytes are to be pushed on the stack. 1496 unsigned NumBytes = CCInfo.getNextStackOffset(); 1497 if (CC == CallingConv::Fast) 1498 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 1499 1500 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1501 // arguments and the arguments after the retaddr has been pushed are aligned. 1502 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1503 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1504 (NumBytes & 7) == 0) 1505 NumBytes += 4; 1506 1507 int FPDiff = 0; 1508 if (IsTailCall) { 1509 // Lower arguments at fp - stackoffset + fpdiff. 1510 unsigned NumBytesCallerPushed = 1511 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 1512 FPDiff = NumBytesCallerPushed - NumBytes; 1513 1514 // Set the delta of movement of the returnaddr stackslot. 1515 // But only set if delta is greater than previous delta. 1516 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 1517 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 1518 } 1519 1520 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes)); 1521 1522 SDOperand RetAddrFrIdx; 1523 if (IsTailCall) { 1524 // Adjust the Return address stack slot. 1525 if (FPDiff) { 1526 MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32; 1527 RetAddrFrIdx = getReturnAddressFrameIndex(DAG); 1528 // Load the "old" Return address. 1529 RetAddrFrIdx = 1530 DAG.getLoad(VT, Chain,RetAddrFrIdx, NULL, 0); 1531 Chain = SDOperand(RetAddrFrIdx.Val, 1); 1532 } 1533 } 1534 1535 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1536 SmallVector<std::pair<unsigned, SDOperand>, 8> TailCallClobberedVRegs; 1537 SmallVector<SDOperand, 8> MemOpChains; 1538 1539 SDOperand StackPtr; 1540 1541 // Walk the register/memloc assignments, inserting copies/loads. For tail 1542 // calls, remember all arguments for later special lowering. 1543 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1544 CCValAssign &VA = ArgLocs[i]; 1545 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1546 1547 // Promote the value if needed. 1548 switch (VA.getLocInfo()) { 1549 default: assert(0 && "Unknown loc info!"); 1550 case CCValAssign::Full: break; 1551 case CCValAssign::SExt: 1552 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1553 break; 1554 case CCValAssign::ZExt: 1555 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1556 break; 1557 case CCValAssign::AExt: 1558 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1559 break; 1560 } 1561 1562 if (VA.isRegLoc()) { 1563 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1564 } else { 1565 if (!IsTailCall) { 1566 assert(VA.isMemLoc()); 1567 if (StackPtr.Val == 0) 1568 StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy()); 1569 1570 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1571 Arg)); 1572 } else if (IsPossiblyOverwrittenArgumentOfTailCall(Arg, MFI)) { 1573 TailCallClobberedVRegs.push_back(std::make_pair(i,Arg)); 1574 } 1575 } 1576 } 1577 1578 if (!MemOpChains.empty()) 1579 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1580 &MemOpChains[0], MemOpChains.size()); 1581 1582 // Build a sequence of copy-to-reg nodes chained together with token chain 1583 // and flag operands which copy the outgoing args into registers. 1584 SDOperand InFlag; 1585 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1586 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1587 InFlag); 1588 InFlag = Chain.getValue(1); 1589 } 1590 1591 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1592 // GOT pointer. 1593 if (CallRequiresGOTPtrInReg(Is64Bit, IsTailCall)) { 1594 Chain = DAG.getCopyToReg(Chain, X86::EBX, 1595 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 1596 InFlag); 1597 InFlag = Chain.getValue(1); 1598 } 1599 // If we are tail calling and generating PIC/GOT style code load the address 1600 // of the callee into ecx. The value in ecx is used as target of the tail 1601 // jump. This is done to circumvent the ebx/callee-saved problem for tail 1602 // calls on PIC/GOT architectures. Normally we would just put the address of 1603 // GOT into ebx and then call target@PLT. But for tail callss ebx would be 1604 // restored (since ebx is callee saved) before jumping to the target@PLT. 1605 if (CallRequiresFnAddressInReg(Is64Bit, IsTailCall)) { 1606 // Note: The actual moving to ecx is done further down. 1607 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 1608 if (G && !G->getGlobal()->hasHiddenVisibility() && 1609 !G->getGlobal()->hasProtectedVisibility()) 1610 Callee = LowerGlobalAddress(Callee, DAG); 1611 else if (isa<ExternalSymbolSDNode>(Callee)) 1612 Callee = LowerExternalSymbol(Callee,DAG); 1613 } 1614 1615 if (Is64Bit && isVarArg) { 1616 // From AMD64 ABI document: 1617 // For calls that may call functions that use varargs or stdargs 1618 // (prototype-less calls or calls to functions containing ellipsis (...) in 1619 // the declaration) %al is used as hidden argument to specify the number 1620 // of SSE registers used. The contents of %al do not need to match exactly 1621 // the number of registers, but must be an ubound on the number of SSE 1622 // registers used and is in the range 0 - 8 inclusive. 1623 1624 // Count the number of XMM registers allocated. 1625 static const unsigned XMMArgRegs[] = { 1626 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1627 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1628 }; 1629 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1630 1631 Chain = DAG.getCopyToReg(Chain, X86::AL, 1632 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 1633 InFlag = Chain.getValue(1); 1634 } 1635 1636 1637 // For tail calls lower the arguments to the 'real' stack slot. 1638 if (IsTailCall) { 1639 SmallVector<SDOperand, 8> MemOpChains2; 1640 SDOperand FIN; 1641 int FI = 0; 1642 // Do not flag preceeding copytoreg stuff together with the following stuff. 1643 InFlag = SDOperand(); 1644 1645 Chain = CopyTailCallClobberedArgumentsToVRegs(Chain, TailCallClobberedVRegs, 1646 DAG, MF, this); 1647 1648 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1649 CCValAssign &VA = ArgLocs[i]; 1650 if (!VA.isRegLoc()) { 1651 assert(VA.isMemLoc()); 1652 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1653 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1654 ISD::ParamFlags::ParamFlagsTy Flags = 1655 cast<ConstantSDNode>(FlagsOp)->getValue(); 1656 // Create frame index. 1657 int32_t Offset = VA.getLocMemOffset()+FPDiff; 1658 uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8; 1659 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); 1660 FIN = DAG.getFrameIndex(FI, MVT::i32); 1661 1662 // Find virtual register for this argument. 1663 bool Found=false; 1664 for (unsigned idx=0, e= TailCallClobberedVRegs.size(); idx < e; idx++) 1665 if (TailCallClobberedVRegs[idx].first==i) { 1666 Arg = TailCallClobberedVRegs[idx].second; 1667 Found=true; 1668 break; 1669 } 1670 assert(IsPossiblyOverwrittenArgumentOfTailCall(Arg, MFI)==false || 1671 (Found==true && "No corresponding Argument was found")); 1672 1673 if (Flags & ISD::ParamFlags::ByVal) { 1674 // Copy relative to framepointer. 1675 MemOpChains2.push_back(CreateCopyOfByValArgument(Arg, FIN, Chain, 1676 Flags, DAG)); 1677 } else { 1678 // Store relative to framepointer. 1679 MemOpChains2.push_back( 1680 DAG.getStore(Chain, Arg, FIN, 1681 PseudoSourceValue::getFixedStack(), FI)); 1682 } 1683 } 1684 } 1685 1686 if (!MemOpChains2.empty()) 1687 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1688 &MemOpChains2[0], MemOpChains2.size()); 1689 1690 // Store the return address to the appropriate stack slot. 1691 if (FPDiff) { 1692 // Calculate the new stack slot for the return address. 1693 int SlotSize = Is64Bit ? 8 : 4; 1694 int NewReturnAddrFI = 1695 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize); 1696 MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32; 1697 SDOperand NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 1698 Chain = DAG.getStore(Chain, RetAddrFrIdx, NewRetAddrFrIdx, 1699 PseudoSourceValue::getFixedStack(), NewReturnAddrFI); 1700 } 1701 } 1702 1703 // If the callee is a GlobalAddress node (quite common, every direct call is) 1704 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1705 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1706 // We should use extra load for direct calls to dllimported functions in 1707 // non-JIT mode. 1708 if ((IsTailCall || !Is64Bit || 1709 getTargetMachine().getCodeModel() != CodeModel::Large) 1710 && !Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1711 getTargetMachine(), true)) 1712 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1713 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1714 if (IsTailCall || !Is64Bit || 1715 getTargetMachine().getCodeModel() != CodeModel::Large) 1716 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1717 } else if (IsTailCall) { 1718 unsigned Opc = Is64Bit ? X86::R9 : X86::ECX; 1719 1720 Chain = DAG.getCopyToReg(Chain, 1721 DAG.getRegister(Opc, getPointerTy()), 1722 Callee,InFlag); 1723 Callee = DAG.getRegister(Opc, getPointerTy()); 1724 // Add register as live out. 1725 DAG.getMachineFunction().getRegInfo().addLiveOut(Opc); 1726 } 1727 1728 // Returns a chain & a flag for retval copy to use. 1729 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1730 SmallVector<SDOperand, 8> Ops; 1731 1732 if (IsTailCall) { 1733 Ops.push_back(Chain); 1734 Ops.push_back(DAG.getIntPtrConstant(NumBytes)); 1735 Ops.push_back(DAG.getIntPtrConstant(0)); 1736 if (InFlag.Val) 1737 Ops.push_back(InFlag); 1738 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1739 InFlag = Chain.getValue(1); 1740 1741 // Returns a chain & a flag for retval copy to use. 1742 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1743 Ops.clear(); 1744 } 1745 1746 Ops.push_back(Chain); 1747 Ops.push_back(Callee); 1748 1749 if (IsTailCall) 1750 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 1751 1752 // Add an implicit use GOT pointer in EBX. 1753 if (!IsTailCall && !Is64Bit && 1754 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1755 Subtarget->isPICStyleGOT()) 1756 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 1757 1758 // Add argument registers to the end of the list so that they are known live 1759 // into the call. 1760 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1761 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1762 RegsToPass[i].second.getValueType())); 1763 1764 if (InFlag.Val) 1765 Ops.push_back(InFlag); 1766 1767 if (IsTailCall) { 1768 assert(InFlag.Val && 1769 "Flag must be set. Depend on flag being set in LowerRET"); 1770 Chain = DAG.getNode(X86ISD::TAILCALL, 1771 Op.Val->getVTList(), &Ops[0], Ops.size()); 1772 1773 return SDOperand(Chain.Val, Op.ResNo); 1774 } 1775 1776 Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size()); 1777 InFlag = Chain.getValue(1); 1778 1779 // Create the CALLSEQ_END node. 1780 unsigned NumBytesForCalleeToPush; 1781 if (IsCalleePop(Op)) 1782 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 1783 else if (!Is64Bit && IsStructRet) 1784 // If this is is a call to a struct-return function, the callee 1785 // pops the hidden struct pointer, so we have to push it back. 1786 // This is common for Darwin/X86, Linux & Mingw32 targets. 1787 NumBytesForCalleeToPush = 4; 1788 else 1789 NumBytesForCalleeToPush = 0; // Callee pops nothing. 1790 1791 // Returns a flag for retval copy to use. 1792 Chain = DAG.getCALLSEQ_END(Chain, 1793 DAG.getIntPtrConstant(NumBytes), 1794 DAG.getIntPtrConstant(NumBytesForCalleeToPush), 1795 InFlag); 1796 InFlag = Chain.getValue(1); 1797 1798 // Handle result values, copying them out of physregs into vregs that we 1799 // return. 1800 switch (SRetMethod) { 1801 default: 1802 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1803 case X86::InGPR64: 1804 return SDOperand(LowerCallResultToTwo64BitRegs(Chain, InFlag, Op.Val, 1805 X86::RAX, X86::RDX, 1806 MVT::i64, DAG), Op.ResNo); 1807 case X86::InSSE: 1808 return SDOperand(LowerCallResultToTwo64BitRegs(Chain, InFlag, Op.Val, 1809 X86::XMM0, X86::XMM1, 1810 MVT::f64, DAG), Op.ResNo); 1811 case X86::InX87: 1812 return SDOperand(LowerCallResultToTwoX87Regs(Chain, InFlag, Op.Val, DAG), 1813 Op.ResNo); 1814 } 1815} 1816 1817 1818//===----------------------------------------------------------------------===// 1819// Fast Calling Convention (tail call) implementation 1820//===----------------------------------------------------------------------===// 1821 1822// Like std call, callee cleans arguments, convention except that ECX is 1823// reserved for storing the tail called function address. Only 2 registers are 1824// free for argument passing (inreg). Tail call optimization is performed 1825// provided: 1826// * tailcallopt is enabled 1827// * caller/callee are fastcc 1828// On X86_64 architecture with GOT-style position independent code only local 1829// (within module) calls are supported at the moment. 1830// To keep the stack aligned according to platform abi the function 1831// GetAlignedArgumentStackSize ensures that argument delta is always multiples 1832// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 1833// If a tail called function callee has more arguments than the caller the 1834// caller needs to make sure that there is room to move the RETADDR to. This is 1835// achieved by reserving an area the size of the argument delta right after the 1836// original REtADDR, but before the saved framepointer or the spilled registers 1837// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 1838// stack layout: 1839// arg1 1840// arg2 1841// RETADDR 1842// [ new RETADDR 1843// move area ] 1844// (possible EBP) 1845// ESI 1846// EDI 1847// local1 .. 1848 1849/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 1850/// for a 16 byte align requirement. 1851unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 1852 SelectionDAG& DAG) { 1853 if (PerformTailCallOpt) { 1854 MachineFunction &MF = DAG.getMachineFunction(); 1855 const TargetMachine &TM = MF.getTarget(); 1856 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 1857 unsigned StackAlignment = TFI.getStackAlignment(); 1858 uint64_t AlignMask = StackAlignment - 1; 1859 int64_t Offset = StackSize; 1860 unsigned SlotSize = Subtarget->is64Bit() ? 8 : 4; 1861 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 1862 // Number smaller than 12 so just add the difference. 1863 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 1864 } else { 1865 // Mask out lower bits, add stackalignment once plus the 12 bytes. 1866 Offset = ((~AlignMask) & Offset) + StackAlignment + 1867 (StackAlignment-SlotSize); 1868 } 1869 StackSize = Offset; 1870 } 1871 return StackSize; 1872} 1873 1874/// IsEligibleForTailCallElimination - Check to see whether the next instruction 1875/// following the call is a return. A function is eligible if caller/callee 1876/// calling conventions match, currently only fastcc supports tail calls, and 1877/// the function CALL is immediatly followed by a RET. 1878bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call, 1879 SDOperand Ret, 1880 SelectionDAG& DAG) const { 1881 if (!PerformTailCallOpt) 1882 return false; 1883 1884 // Check whether CALL node immediatly preceeds the RET node and whether the 1885 // return uses the result of the node or is a void return. 1886 unsigned NumOps = Ret.getNumOperands(); 1887 if ((NumOps == 1 && 1888 (Ret.getOperand(0) == SDOperand(Call.Val,1) || 1889 Ret.getOperand(0) == SDOperand(Call.Val,0))) || 1890 (NumOps > 1 && 1891 Ret.getOperand(0) == SDOperand(Call.Val,Call.Val->getNumValues()-1) && 1892 Ret.getOperand(1) == SDOperand(Call.Val,0))) { 1893 MachineFunction &MF = DAG.getMachineFunction(); 1894 unsigned CallerCC = MF.getFunction()->getCallingConv(); 1895 unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue(); 1896 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 1897 SDOperand Callee = Call.getOperand(4); 1898 // On x86/32Bit PIC/GOT tail calls are supported. 1899 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ || 1900 !Subtarget->isPICStyleGOT()|| !Subtarget->is64Bit()) 1901 return true; 1902 1903 // Can only do local tail calls (in same module, hidden or protected) on 1904 // x86_64 PIC/GOT at the moment. 1905 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1906 return G->getGlobal()->hasHiddenVisibility() 1907 || G->getGlobal()->hasProtectedVisibility(); 1908 } 1909 } 1910 1911 return false; 1912} 1913 1914//===----------------------------------------------------------------------===// 1915// Other Lowering Hooks 1916//===----------------------------------------------------------------------===// 1917 1918 1919SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { 1920 MachineFunction &MF = DAG.getMachineFunction(); 1921 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1922 int ReturnAddrIndex = FuncInfo->getRAIndex(); 1923 1924 if (ReturnAddrIndex == 0) { 1925 // Set up a frame object for the return address. 1926 if (Subtarget->is64Bit()) 1927 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8); 1928 else 1929 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4); 1930 1931 FuncInfo->setRAIndex(ReturnAddrIndex); 1932 } 1933 1934 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 1935} 1936 1937 1938 1939/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86 1940/// specific condition code. It returns a false if it cannot do a direct 1941/// translation. X86CC is the translated CondCode. LHS/RHS are modified as 1942/// needed. 1943static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 1944 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS, 1945 SelectionDAG &DAG) { 1946 X86CC = X86::COND_INVALID; 1947 if (!isFP) { 1948 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 1949 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 1950 // X > -1 -> X == 0, jump !sign. 1951 RHS = DAG.getConstant(0, RHS.getValueType()); 1952 X86CC = X86::COND_NS; 1953 return true; 1954 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 1955 // X < 0 -> X == 0, jump on sign. 1956 X86CC = X86::COND_S; 1957 return true; 1958 } else if (SetCCOpcode == ISD::SETLT && RHSC->getValue() == 1) { 1959 // X < 1 -> X <= 0 1960 RHS = DAG.getConstant(0, RHS.getValueType()); 1961 X86CC = X86::COND_LE; 1962 return true; 1963 } 1964 } 1965 1966 switch (SetCCOpcode) { 1967 default: break; 1968 case ISD::SETEQ: X86CC = X86::COND_E; break; 1969 case ISD::SETGT: X86CC = X86::COND_G; break; 1970 case ISD::SETGE: X86CC = X86::COND_GE; break; 1971 case ISD::SETLT: X86CC = X86::COND_L; break; 1972 case ISD::SETLE: X86CC = X86::COND_LE; break; 1973 case ISD::SETNE: X86CC = X86::COND_NE; break; 1974 case ISD::SETULT: X86CC = X86::COND_B; break; 1975 case ISD::SETUGT: X86CC = X86::COND_A; break; 1976 case ISD::SETULE: X86CC = X86::COND_BE; break; 1977 case ISD::SETUGE: X86CC = X86::COND_AE; break; 1978 } 1979 } else { 1980 // On a floating point condition, the flags are set as follows: 1981 // ZF PF CF op 1982 // 0 | 0 | 0 | X > Y 1983 // 0 | 0 | 1 | X < Y 1984 // 1 | 0 | 0 | X == Y 1985 // 1 | 1 | 1 | unordered 1986 bool Flip = false; 1987 switch (SetCCOpcode) { 1988 default: break; 1989 case ISD::SETUEQ: 1990 case ISD::SETEQ: X86CC = X86::COND_E; break; 1991 case ISD::SETOLT: Flip = true; // Fallthrough 1992 case ISD::SETOGT: 1993 case ISD::SETGT: X86CC = X86::COND_A; break; 1994 case ISD::SETOLE: Flip = true; // Fallthrough 1995 case ISD::SETOGE: 1996 case ISD::SETGE: X86CC = X86::COND_AE; break; 1997 case ISD::SETUGT: Flip = true; // Fallthrough 1998 case ISD::SETULT: 1999 case ISD::SETLT: X86CC = X86::COND_B; break; 2000 case ISD::SETUGE: Flip = true; // Fallthrough 2001 case ISD::SETULE: 2002 case ISD::SETLE: X86CC = X86::COND_BE; break; 2003 case ISD::SETONE: 2004 case ISD::SETNE: X86CC = X86::COND_NE; break; 2005 case ISD::SETUO: X86CC = X86::COND_P; break; 2006 case ISD::SETO: X86CC = X86::COND_NP; break; 2007 } 2008 if (Flip) 2009 std::swap(LHS, RHS); 2010 } 2011 2012 return X86CC != X86::COND_INVALID; 2013} 2014 2015/// hasFPCMov - is there a floating point cmov for the specific X86 condition 2016/// code. Current x86 isa includes the following FP cmov instructions: 2017/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 2018static bool hasFPCMov(unsigned X86CC) { 2019 switch (X86CC) { 2020 default: 2021 return false; 2022 case X86::COND_B: 2023 case X86::COND_BE: 2024 case X86::COND_E: 2025 case X86::COND_P: 2026 case X86::COND_A: 2027 case X86::COND_AE: 2028 case X86::COND_NE: 2029 case X86::COND_NP: 2030 return true; 2031 } 2032} 2033 2034/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return 2035/// true if Op is undef or if its value falls within the specified range (L, H]. 2036static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { 2037 if (Op.getOpcode() == ISD::UNDEF) 2038 return true; 2039 2040 unsigned Val = cast<ConstantSDNode>(Op)->getValue(); 2041 return (Val >= Low && Val < Hi); 2042} 2043 2044/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return 2045/// true if Op is undef or if its value equal to the specified value. 2046static bool isUndefOrEqual(SDOperand Op, unsigned Val) { 2047 if (Op.getOpcode() == ISD::UNDEF) 2048 return true; 2049 return cast<ConstantSDNode>(Op)->getValue() == Val; 2050} 2051 2052/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand 2053/// specifies a shuffle of elements that is suitable for input to PSHUFD. 2054bool X86::isPSHUFDMask(SDNode *N) { 2055 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2056 2057 if (N->getNumOperands() != 2 && N->getNumOperands() != 4) 2058 return false; 2059 2060 // Check if the value doesn't reference the second vector. 2061 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2062 SDOperand Arg = N->getOperand(i); 2063 if (Arg.getOpcode() == ISD::UNDEF) continue; 2064 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2065 if (cast<ConstantSDNode>(Arg)->getValue() >= e) 2066 return false; 2067 } 2068 2069 return true; 2070} 2071 2072/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand 2073/// specifies a shuffle of elements that is suitable for input to PSHUFHW. 2074bool X86::isPSHUFHWMask(SDNode *N) { 2075 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2076 2077 if (N->getNumOperands() != 8) 2078 return false; 2079 2080 // Lower quadword copied in order. 2081 for (unsigned i = 0; i != 4; ++i) { 2082 SDOperand Arg = N->getOperand(i); 2083 if (Arg.getOpcode() == ISD::UNDEF) continue; 2084 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2085 if (cast<ConstantSDNode>(Arg)->getValue() != i) 2086 return false; 2087 } 2088 2089 // Upper quadword shuffled. 2090 for (unsigned i = 4; i != 8; ++i) { 2091 SDOperand Arg = N->getOperand(i); 2092 if (Arg.getOpcode() == ISD::UNDEF) continue; 2093 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2094 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2095 if (Val < 4 || Val > 7) 2096 return false; 2097 } 2098 2099 return true; 2100} 2101 2102/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand 2103/// specifies a shuffle of elements that is suitable for input to PSHUFLW. 2104bool X86::isPSHUFLWMask(SDNode *N) { 2105 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2106 2107 if (N->getNumOperands() != 8) 2108 return false; 2109 2110 // Upper quadword copied in order. 2111 for (unsigned i = 4; i != 8; ++i) 2112 if (!isUndefOrEqual(N->getOperand(i), i)) 2113 return false; 2114 2115 // Lower quadword shuffled. 2116 for (unsigned i = 0; i != 4; ++i) 2117 if (!isUndefOrInRange(N->getOperand(i), 0, 4)) 2118 return false; 2119 2120 return true; 2121} 2122 2123/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 2124/// specifies a shuffle of elements that is suitable for input to SHUFP*. 2125static bool isSHUFPMask(const SDOperand *Elems, unsigned NumElems) { 2126 if (NumElems != 2 && NumElems != 4) return false; 2127 2128 unsigned Half = NumElems / 2; 2129 for (unsigned i = 0; i < Half; ++i) 2130 if (!isUndefOrInRange(Elems[i], 0, NumElems)) 2131 return false; 2132 for (unsigned i = Half; i < NumElems; ++i) 2133 if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2)) 2134 return false; 2135 2136 return true; 2137} 2138 2139bool X86::isSHUFPMask(SDNode *N) { 2140 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2141 return ::isSHUFPMask(N->op_begin(), N->getNumOperands()); 2142} 2143 2144/// isCommutedSHUFP - Returns true if the shuffle mask is exactly 2145/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 2146/// half elements to come from vector 1 (which would equal the dest.) and 2147/// the upper half to come from vector 2. 2148static bool isCommutedSHUFP(const SDOperand *Ops, unsigned NumOps) { 2149 if (NumOps != 2 && NumOps != 4) return false; 2150 2151 unsigned Half = NumOps / 2; 2152 for (unsigned i = 0; i < Half; ++i) 2153 if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2)) 2154 return false; 2155 for (unsigned i = Half; i < NumOps; ++i) 2156 if (!isUndefOrInRange(Ops[i], 0, NumOps)) 2157 return false; 2158 return true; 2159} 2160 2161static bool isCommutedSHUFP(SDNode *N) { 2162 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2163 return isCommutedSHUFP(N->op_begin(), N->getNumOperands()); 2164} 2165 2166/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 2167/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 2168bool X86::isMOVHLPSMask(SDNode *N) { 2169 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2170 2171 if (N->getNumOperands() != 4) 2172 return false; 2173 2174 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 2175 return isUndefOrEqual(N->getOperand(0), 6) && 2176 isUndefOrEqual(N->getOperand(1), 7) && 2177 isUndefOrEqual(N->getOperand(2), 2) && 2178 isUndefOrEqual(N->getOperand(3), 3); 2179} 2180 2181/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 2182/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 2183/// <2, 3, 2, 3> 2184bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) { 2185 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2186 2187 if (N->getNumOperands() != 4) 2188 return false; 2189 2190 // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3 2191 return isUndefOrEqual(N->getOperand(0), 2) && 2192 isUndefOrEqual(N->getOperand(1), 3) && 2193 isUndefOrEqual(N->getOperand(2), 2) && 2194 isUndefOrEqual(N->getOperand(3), 3); 2195} 2196 2197/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 2198/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 2199bool X86::isMOVLPMask(SDNode *N) { 2200 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2201 2202 unsigned NumElems = N->getNumOperands(); 2203 if (NumElems != 2 && NumElems != 4) 2204 return false; 2205 2206 for (unsigned i = 0; i < NumElems/2; ++i) 2207 if (!isUndefOrEqual(N->getOperand(i), i + NumElems)) 2208 return false; 2209 2210 for (unsigned i = NumElems/2; i < NumElems; ++i) 2211 if (!isUndefOrEqual(N->getOperand(i), i)) 2212 return false; 2213 2214 return true; 2215} 2216 2217/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand 2218/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D} 2219/// and MOVLHPS. 2220bool X86::isMOVHPMask(SDNode *N) { 2221 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2222 2223 unsigned NumElems = N->getNumOperands(); 2224 if (NumElems != 2 && NumElems != 4) 2225 return false; 2226 2227 for (unsigned i = 0; i < NumElems/2; ++i) 2228 if (!isUndefOrEqual(N->getOperand(i), i)) 2229 return false; 2230 2231 for (unsigned i = 0; i < NumElems/2; ++i) { 2232 SDOperand Arg = N->getOperand(i + NumElems/2); 2233 if (!isUndefOrEqual(Arg, i + NumElems)) 2234 return false; 2235 } 2236 2237 return true; 2238} 2239 2240/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 2241/// specifies a shuffle of elements that is suitable for input to UNPCKL. 2242bool static isUNPCKLMask(const SDOperand *Elts, unsigned NumElts, 2243 bool V2IsSplat = false) { 2244 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2245 return false; 2246 2247 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2248 SDOperand BitI = Elts[i]; 2249 SDOperand BitI1 = Elts[i+1]; 2250 if (!isUndefOrEqual(BitI, j)) 2251 return false; 2252 if (V2IsSplat) { 2253 if (isUndefOrEqual(BitI1, NumElts)) 2254 return false; 2255 } else { 2256 if (!isUndefOrEqual(BitI1, j + NumElts)) 2257 return false; 2258 } 2259 } 2260 2261 return true; 2262} 2263 2264bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) { 2265 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2266 return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2267} 2268 2269/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 2270/// specifies a shuffle of elements that is suitable for input to UNPCKH. 2271bool static isUNPCKHMask(const SDOperand *Elts, unsigned NumElts, 2272 bool V2IsSplat = false) { 2273 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2274 return false; 2275 2276 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2277 SDOperand BitI = Elts[i]; 2278 SDOperand BitI1 = Elts[i+1]; 2279 if (!isUndefOrEqual(BitI, j + NumElts/2)) 2280 return false; 2281 if (V2IsSplat) { 2282 if (isUndefOrEqual(BitI1, NumElts)) 2283 return false; 2284 } else { 2285 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts)) 2286 return false; 2287 } 2288 } 2289 2290 return true; 2291} 2292 2293bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) { 2294 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2295 return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2296} 2297 2298/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 2299/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 2300/// <0, 0, 1, 1> 2301bool X86::isUNPCKL_v_undef_Mask(SDNode *N) { 2302 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2303 2304 unsigned NumElems = N->getNumOperands(); 2305 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2306 return false; 2307 2308 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2309 SDOperand BitI = N->getOperand(i); 2310 SDOperand BitI1 = N->getOperand(i+1); 2311 2312 if (!isUndefOrEqual(BitI, j)) 2313 return false; 2314 if (!isUndefOrEqual(BitI1, j)) 2315 return false; 2316 } 2317 2318 return true; 2319} 2320 2321/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 2322/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 2323/// <2, 2, 3, 3> 2324bool X86::isUNPCKH_v_undef_Mask(SDNode *N) { 2325 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2326 2327 unsigned NumElems = N->getNumOperands(); 2328 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2329 return false; 2330 2331 for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { 2332 SDOperand BitI = N->getOperand(i); 2333 SDOperand BitI1 = N->getOperand(i + 1); 2334 2335 if (!isUndefOrEqual(BitI, j)) 2336 return false; 2337 if (!isUndefOrEqual(BitI1, j)) 2338 return false; 2339 } 2340 2341 return true; 2342} 2343 2344/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 2345/// specifies a shuffle of elements that is suitable for input to MOVSS, 2346/// MOVSD, and MOVD, i.e. setting the lowest element. 2347static bool isMOVLMask(const SDOperand *Elts, unsigned NumElts) { 2348 if (NumElts != 2 && NumElts != 4) 2349 return false; 2350 2351 if (!isUndefOrEqual(Elts[0], NumElts)) 2352 return false; 2353 2354 for (unsigned i = 1; i < NumElts; ++i) { 2355 if (!isUndefOrEqual(Elts[i], i)) 2356 return false; 2357 } 2358 2359 return true; 2360} 2361 2362bool X86::isMOVLMask(SDNode *N) { 2363 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2364 return ::isMOVLMask(N->op_begin(), N->getNumOperands()); 2365} 2366 2367/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 2368/// of what x86 movss want. X86 movs requires the lowest element to be lowest 2369/// element of vector 2 and the other elements to come from vector 1 in order. 2370static bool isCommutedMOVL(const SDOperand *Ops, unsigned NumOps, 2371 bool V2IsSplat = false, 2372 bool V2IsUndef = false) { 2373 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 2374 return false; 2375 2376 if (!isUndefOrEqual(Ops[0], 0)) 2377 return false; 2378 2379 for (unsigned i = 1; i < NumOps; ++i) { 2380 SDOperand Arg = Ops[i]; 2381 if (!(isUndefOrEqual(Arg, i+NumOps) || 2382 (V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) || 2383 (V2IsSplat && isUndefOrEqual(Arg, NumOps)))) 2384 return false; 2385 } 2386 2387 return true; 2388} 2389 2390static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false, 2391 bool V2IsUndef = false) { 2392 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2393 return isCommutedMOVL(N->op_begin(), N->getNumOperands(), 2394 V2IsSplat, V2IsUndef); 2395} 2396 2397/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2398/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 2399bool X86::isMOVSHDUPMask(SDNode *N) { 2400 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2401 2402 if (N->getNumOperands() != 4) 2403 return false; 2404 2405 // Expect 1, 1, 3, 3 2406 for (unsigned i = 0; i < 2; ++i) { 2407 SDOperand Arg = N->getOperand(i); 2408 if (Arg.getOpcode() == ISD::UNDEF) continue; 2409 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2410 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2411 if (Val != 1) return false; 2412 } 2413 2414 bool HasHi = false; 2415 for (unsigned i = 2; i < 4; ++i) { 2416 SDOperand Arg = N->getOperand(i); 2417 if (Arg.getOpcode() == ISD::UNDEF) continue; 2418 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2419 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2420 if (Val != 3) return false; 2421 HasHi = true; 2422 } 2423 2424 // Don't use movshdup if it can be done with a shufps. 2425 return HasHi; 2426} 2427 2428/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2429/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 2430bool X86::isMOVSLDUPMask(SDNode *N) { 2431 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2432 2433 if (N->getNumOperands() != 4) 2434 return false; 2435 2436 // Expect 0, 0, 2, 2 2437 for (unsigned i = 0; i < 2; ++i) { 2438 SDOperand Arg = N->getOperand(i); 2439 if (Arg.getOpcode() == ISD::UNDEF) continue; 2440 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2441 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2442 if (Val != 0) return false; 2443 } 2444 2445 bool HasHi = false; 2446 for (unsigned i = 2; i < 4; ++i) { 2447 SDOperand Arg = N->getOperand(i); 2448 if (Arg.getOpcode() == ISD::UNDEF) continue; 2449 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2450 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2451 if (Val != 2) return false; 2452 HasHi = true; 2453 } 2454 2455 // Don't use movshdup if it can be done with a shufps. 2456 return HasHi; 2457} 2458 2459/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand 2460/// specifies a identity operation on the LHS or RHS. 2461static bool isIdentityMask(SDNode *N, bool RHS = false) { 2462 unsigned NumElems = N->getNumOperands(); 2463 for (unsigned i = 0; i < NumElems; ++i) 2464 if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0))) 2465 return false; 2466 return true; 2467} 2468 2469/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2470/// a splat of a single element. 2471static bool isSplatMask(SDNode *N) { 2472 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2473 2474 // This is a splat operation if each element of the permute is the same, and 2475 // if the value doesn't reference the second vector. 2476 unsigned NumElems = N->getNumOperands(); 2477 SDOperand ElementBase; 2478 unsigned i = 0; 2479 for (; i != NumElems; ++i) { 2480 SDOperand Elt = N->getOperand(i); 2481 if (isa<ConstantSDNode>(Elt)) { 2482 ElementBase = Elt; 2483 break; 2484 } 2485 } 2486 2487 if (!ElementBase.Val) 2488 return false; 2489 2490 for (; i != NumElems; ++i) { 2491 SDOperand Arg = N->getOperand(i); 2492 if (Arg.getOpcode() == ISD::UNDEF) continue; 2493 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2494 if (Arg != ElementBase) return false; 2495 } 2496 2497 // Make sure it is a splat of the first vector operand. 2498 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems; 2499} 2500 2501/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2502/// a splat of a single element and it's a 2 or 4 element mask. 2503bool X86::isSplatMask(SDNode *N) { 2504 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2505 2506 // We can only splat 64-bit, and 32-bit quantities with a single instruction. 2507 if (N->getNumOperands() != 4 && N->getNumOperands() != 2) 2508 return false; 2509 return ::isSplatMask(N); 2510} 2511 2512/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand 2513/// specifies a splat of zero element. 2514bool X86::isSplatLoMask(SDNode *N) { 2515 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2516 2517 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) 2518 if (!isUndefOrEqual(N->getOperand(i), 0)) 2519 return false; 2520 return true; 2521} 2522 2523/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 2524/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* 2525/// instructions. 2526unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 2527 unsigned NumOperands = N->getNumOperands(); 2528 unsigned Shift = (NumOperands == 4) ? 2 : 1; 2529 unsigned Mask = 0; 2530 for (unsigned i = 0; i < NumOperands; ++i) { 2531 unsigned Val = 0; 2532 SDOperand Arg = N->getOperand(NumOperands-i-1); 2533 if (Arg.getOpcode() != ISD::UNDEF) 2534 Val = cast<ConstantSDNode>(Arg)->getValue(); 2535 if (Val >= NumOperands) Val -= NumOperands; 2536 Mask |= Val; 2537 if (i != NumOperands - 1) 2538 Mask <<= Shift; 2539 } 2540 2541 return Mask; 2542} 2543 2544/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 2545/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW 2546/// instructions. 2547unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 2548 unsigned Mask = 0; 2549 // 8 nodes, but we only care about the last 4. 2550 for (unsigned i = 7; i >= 4; --i) { 2551 unsigned Val = 0; 2552 SDOperand Arg = N->getOperand(i); 2553 if (Arg.getOpcode() != ISD::UNDEF) 2554 Val = cast<ConstantSDNode>(Arg)->getValue(); 2555 Mask |= (Val - 4); 2556 if (i != 4) 2557 Mask <<= 2; 2558 } 2559 2560 return Mask; 2561} 2562 2563/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 2564/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW 2565/// instructions. 2566unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 2567 unsigned Mask = 0; 2568 // 8 nodes, but we only care about the first 4. 2569 for (int i = 3; i >= 0; --i) { 2570 unsigned Val = 0; 2571 SDOperand Arg = N->getOperand(i); 2572 if (Arg.getOpcode() != ISD::UNDEF) 2573 Val = cast<ConstantSDNode>(Arg)->getValue(); 2574 Mask |= Val; 2575 if (i != 0) 2576 Mask <<= 2; 2577 } 2578 2579 return Mask; 2580} 2581 2582/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand 2583/// specifies a 8 element shuffle that can be broken into a pair of 2584/// PSHUFHW and PSHUFLW. 2585static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { 2586 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2587 2588 if (N->getNumOperands() != 8) 2589 return false; 2590 2591 // Lower quadword shuffled. 2592 for (unsigned i = 0; i != 4; ++i) { 2593 SDOperand Arg = N->getOperand(i); 2594 if (Arg.getOpcode() == ISD::UNDEF) continue; 2595 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2596 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2597 if (Val >= 4) 2598 return false; 2599 } 2600 2601 // Upper quadword shuffled. 2602 for (unsigned i = 4; i != 8; ++i) { 2603 SDOperand Arg = N->getOperand(i); 2604 if (Arg.getOpcode() == ISD::UNDEF) continue; 2605 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2606 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2607 if (Val < 4 || Val > 7) 2608 return false; 2609 } 2610 2611 return true; 2612} 2613 2614/// CommuteVectorShuffle - Swap vector_shuffle operands as well as 2615/// values in ther permute mask. 2616static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, 2617 SDOperand &V2, SDOperand &Mask, 2618 SelectionDAG &DAG) { 2619 MVT::ValueType VT = Op.getValueType(); 2620 MVT::ValueType MaskVT = Mask.getValueType(); 2621 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2622 unsigned NumElems = Mask.getNumOperands(); 2623 SmallVector<SDOperand, 8> MaskVec; 2624 2625 for (unsigned i = 0; i != NumElems; ++i) { 2626 SDOperand Arg = Mask.getOperand(i); 2627 if (Arg.getOpcode() == ISD::UNDEF) { 2628 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2629 continue; 2630 } 2631 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2632 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2633 if (Val < NumElems) 2634 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2635 else 2636 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2637 } 2638 2639 std::swap(V1, V2); 2640 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2641 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2642} 2643 2644/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 2645/// the two vector operands have swapped position. 2646static 2647SDOperand CommuteVectorShuffleMask(SDOperand Mask, SelectionDAG &DAG) { 2648 MVT::ValueType MaskVT = Mask.getValueType(); 2649 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2650 unsigned NumElems = Mask.getNumOperands(); 2651 SmallVector<SDOperand, 8> MaskVec; 2652 for (unsigned i = 0; i != NumElems; ++i) { 2653 SDOperand Arg = Mask.getOperand(i); 2654 if (Arg.getOpcode() == ISD::UNDEF) { 2655 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2656 continue; 2657 } 2658 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2659 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2660 if (Val < NumElems) 2661 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2662 else 2663 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2664 } 2665 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2666} 2667 2668 2669/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 2670/// match movhlps. The lower half elements should come from upper half of 2671/// V1 (and in order), and the upper half elements should come from the upper 2672/// half of V2 (and in order). 2673static bool ShouldXformToMOVHLPS(SDNode *Mask) { 2674 unsigned NumElems = Mask->getNumOperands(); 2675 if (NumElems != 4) 2676 return false; 2677 for (unsigned i = 0, e = 2; i != e; ++i) 2678 if (!isUndefOrEqual(Mask->getOperand(i), i+2)) 2679 return false; 2680 for (unsigned i = 2; i != 4; ++i) 2681 if (!isUndefOrEqual(Mask->getOperand(i), i+4)) 2682 return false; 2683 return true; 2684} 2685 2686/// isScalarLoadToVector - Returns true if the node is a scalar load that 2687/// is promoted to a vector. 2688static inline bool isScalarLoadToVector(SDNode *N) { 2689 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) { 2690 N = N->getOperand(0).Val; 2691 return ISD::isNON_EXTLoad(N); 2692 } 2693 return false; 2694} 2695 2696/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 2697/// match movlp{s|d}. The lower half elements should come from lower half of 2698/// V1 (and in order), and the upper half elements should come from the upper 2699/// half of V2 (and in order). And since V1 will become the source of the 2700/// MOVLP, it must be either a vector load or a scalar load to vector. 2701static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) { 2702 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 2703 return false; 2704 // Is V2 is a vector load, don't do this transformation. We will try to use 2705 // load folding shufps op. 2706 if (ISD::isNON_EXTLoad(V2)) 2707 return false; 2708 2709 unsigned NumElems = Mask->getNumOperands(); 2710 if (NumElems != 2 && NumElems != 4) 2711 return false; 2712 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 2713 if (!isUndefOrEqual(Mask->getOperand(i), i)) 2714 return false; 2715 for (unsigned i = NumElems/2; i != NumElems; ++i) 2716 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems)) 2717 return false; 2718 return true; 2719} 2720 2721/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 2722/// all the same. 2723static bool isSplatVector(SDNode *N) { 2724 if (N->getOpcode() != ISD::BUILD_VECTOR) 2725 return false; 2726 2727 SDOperand SplatValue = N->getOperand(0); 2728 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 2729 if (N->getOperand(i) != SplatValue) 2730 return false; 2731 return true; 2732} 2733 2734/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2735/// to an undef. 2736static bool isUndefShuffle(SDNode *N) { 2737 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2738 return false; 2739 2740 SDOperand V1 = N->getOperand(0); 2741 SDOperand V2 = N->getOperand(1); 2742 SDOperand Mask = N->getOperand(2); 2743 unsigned NumElems = Mask.getNumOperands(); 2744 for (unsigned i = 0; i != NumElems; ++i) { 2745 SDOperand Arg = Mask.getOperand(i); 2746 if (Arg.getOpcode() != ISD::UNDEF) { 2747 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2748 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF) 2749 return false; 2750 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF) 2751 return false; 2752 } 2753 } 2754 return true; 2755} 2756 2757/// isZeroNode - Returns true if Elt is a constant zero or a floating point 2758/// constant +0.0. 2759static inline bool isZeroNode(SDOperand Elt) { 2760 return ((isa<ConstantSDNode>(Elt) && 2761 cast<ConstantSDNode>(Elt)->getValue() == 0) || 2762 (isa<ConstantFPSDNode>(Elt) && 2763 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 2764} 2765 2766/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2767/// to an zero vector. 2768static bool isZeroShuffle(SDNode *N) { 2769 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2770 return false; 2771 2772 SDOperand V1 = N->getOperand(0); 2773 SDOperand V2 = N->getOperand(1); 2774 SDOperand Mask = N->getOperand(2); 2775 unsigned NumElems = Mask.getNumOperands(); 2776 for (unsigned i = 0; i != NumElems; ++i) { 2777 SDOperand Arg = Mask.getOperand(i); 2778 if (Arg.getOpcode() == ISD::UNDEF) 2779 continue; 2780 2781 unsigned Idx = cast<ConstantSDNode>(Arg)->getValue(); 2782 if (Idx < NumElems) { 2783 unsigned Opc = V1.Val->getOpcode(); 2784 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.Val)) 2785 continue; 2786 if (Opc != ISD::BUILD_VECTOR || 2787 !isZeroNode(V1.Val->getOperand(Idx))) 2788 return false; 2789 } else if (Idx >= NumElems) { 2790 unsigned Opc = V2.Val->getOpcode(); 2791 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.Val)) 2792 continue; 2793 if (Opc != ISD::BUILD_VECTOR || 2794 !isZeroNode(V2.Val->getOperand(Idx - NumElems))) 2795 return false; 2796 } 2797 } 2798 return true; 2799} 2800 2801/// getZeroVector - Returns a vector of specified type with all zero elements. 2802/// 2803static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) { 2804 assert(MVT::isVector(VT) && "Expected a vector type"); 2805 2806 // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2807 // type. This ensures they get CSE'd. 2808 SDOperand Cst = DAG.getTargetConstant(0, MVT::i32); 2809 SDOperand Vec; 2810 if (MVT::getSizeInBits(VT) == 64) // MMX 2811 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2812 else // SSE 2813 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2814 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2815} 2816 2817/// getOnesVector - Returns a vector of specified type with all bits set. 2818/// 2819static SDOperand getOnesVector(MVT::ValueType VT, SelectionDAG &DAG) { 2820 assert(MVT::isVector(VT) && "Expected a vector type"); 2821 2822 // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2823 // type. This ensures they get CSE'd. 2824 SDOperand Cst = DAG.getTargetConstant(~0U, MVT::i32); 2825 SDOperand Vec; 2826 if (MVT::getSizeInBits(VT) == 64) // MMX 2827 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2828 else // SSE 2829 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2830 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2831} 2832 2833 2834/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 2835/// that point to V2 points to its first element. 2836static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { 2837 assert(Mask.getOpcode() == ISD::BUILD_VECTOR); 2838 2839 bool Changed = false; 2840 SmallVector<SDOperand, 8> MaskVec; 2841 unsigned NumElems = Mask.getNumOperands(); 2842 for (unsigned i = 0; i != NumElems; ++i) { 2843 SDOperand Arg = Mask.getOperand(i); 2844 if (Arg.getOpcode() != ISD::UNDEF) { 2845 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2846 if (Val > NumElems) { 2847 Arg = DAG.getConstant(NumElems, Arg.getValueType()); 2848 Changed = true; 2849 } 2850 } 2851 MaskVec.push_back(Arg); 2852 } 2853 2854 if (Changed) 2855 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), 2856 &MaskVec[0], MaskVec.size()); 2857 return Mask; 2858} 2859 2860/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 2861/// operation of specified width. 2862static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { 2863 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2864 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2865 2866 SmallVector<SDOperand, 8> MaskVec; 2867 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); 2868 for (unsigned i = 1; i != NumElems; ++i) 2869 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2870 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2871} 2872 2873/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation 2874/// of specified width. 2875static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { 2876 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2877 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2878 SmallVector<SDOperand, 8> MaskVec; 2879 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 2880 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2881 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); 2882 } 2883 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2884} 2885 2886/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation 2887/// of specified width. 2888static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { 2889 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2890 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2891 unsigned Half = NumElems/2; 2892 SmallVector<SDOperand, 8> MaskVec; 2893 for (unsigned i = 0; i != Half; ++i) { 2894 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); 2895 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); 2896 } 2897 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2898} 2899 2900/// getSwapEltZeroMask - Returns a vector_shuffle mask for a shuffle that swaps 2901/// element #0 of a vector with the specified index, leaving the rest of the 2902/// elements in place. 2903static SDOperand getSwapEltZeroMask(unsigned NumElems, unsigned DestElt, 2904 SelectionDAG &DAG) { 2905 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2906 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2907 SmallVector<SDOperand, 8> MaskVec; 2908 // Element #0 of the result gets the elt we are replacing. 2909 MaskVec.push_back(DAG.getConstant(DestElt, BaseVT)); 2910 for (unsigned i = 1; i != NumElems; ++i) 2911 MaskVec.push_back(DAG.getConstant(i == DestElt ? 0 : i, BaseVT)); 2912 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2913} 2914 2915/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32. 2916/// 2917static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) { 2918 SDOperand V1 = Op.getOperand(0); 2919 SDOperand Mask = Op.getOperand(2); 2920 MVT::ValueType VT = Op.getValueType(); 2921 unsigned NumElems = Mask.getNumOperands(); 2922 Mask = getUnpacklMask(NumElems, DAG); 2923 while (NumElems != 4) { 2924 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask); 2925 NumElems >>= 1; 2926 } 2927 V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1); 2928 2929 Mask = getZeroVector(MVT::v4i32, DAG); 2930 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1, 2931 DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask); 2932 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); 2933} 2934 2935/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 2936/// vector of zero or undef vector. This produces a shuffle where the low 2937/// element of V2 is swizzled into the zero/undef vector, landing at element 2938/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 2939static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, unsigned Idx, 2940 bool isZero, SelectionDAG &DAG) { 2941 MVT::ValueType VT = V2.getValueType(); 2942 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT); 2943 unsigned NumElems = MVT::getVectorNumElements(V2.getValueType()); 2944 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2945 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 2946 SmallVector<SDOperand, 16> MaskVec; 2947 for (unsigned i = 0; i != NumElems; ++i) 2948 if (i == Idx) // If this is the insertion idx, put the low elt of V2 here. 2949 MaskVec.push_back(DAG.getConstant(NumElems, EVT)); 2950 else 2951 MaskVec.push_back(DAG.getConstant(i, EVT)); 2952 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2953 &MaskVec[0], MaskVec.size()); 2954 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2955} 2956 2957/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 2958/// 2959static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, 2960 unsigned NumNonZero, unsigned NumZero, 2961 SelectionDAG &DAG, TargetLowering &TLI) { 2962 if (NumNonZero > 8) 2963 return SDOperand(); 2964 2965 SDOperand V(0, 0); 2966 bool First = true; 2967 for (unsigned i = 0; i < 16; ++i) { 2968 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 2969 if (ThisIsNonZero && First) { 2970 if (NumZero) 2971 V = getZeroVector(MVT::v8i16, DAG); 2972 else 2973 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2974 First = false; 2975 } 2976 2977 if ((i & 1) != 0) { 2978 SDOperand ThisElt(0, 0), LastElt(0, 0); 2979 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 2980 if (LastIsNonZero) { 2981 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); 2982 } 2983 if (ThisIsNonZero) { 2984 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i)); 2985 ThisElt = DAG.getNode(ISD::SHL, MVT::i16, 2986 ThisElt, DAG.getConstant(8, MVT::i8)); 2987 if (LastIsNonZero) 2988 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt); 2989 } else 2990 ThisElt = LastElt; 2991 2992 if (ThisElt.Val) 2993 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt, 2994 DAG.getIntPtrConstant(i/2)); 2995 } 2996 } 2997 2998 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V); 2999} 3000 3001/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 3002/// 3003static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, 3004 unsigned NumNonZero, unsigned NumZero, 3005 SelectionDAG &DAG, TargetLowering &TLI) { 3006 if (NumNonZero > 4) 3007 return SDOperand(); 3008 3009 SDOperand V(0, 0); 3010 bool First = true; 3011 for (unsigned i = 0; i < 8; ++i) { 3012 bool isNonZero = (NonZeros & (1 << i)) != 0; 3013 if (isNonZero) { 3014 if (First) { 3015 if (NumZero) 3016 V = getZeroVector(MVT::v8i16, DAG); 3017 else 3018 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 3019 First = false; 3020 } 3021 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i), 3022 DAG.getIntPtrConstant(i)); 3023 } 3024 } 3025 3026 return V; 3027} 3028 3029SDOperand 3030X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3031 // All zero's are handled with pxor, all one's are handled with pcmpeqd. 3032 if (ISD::isBuildVectorAllZeros(Op.Val) || ISD::isBuildVectorAllOnes(Op.Val)) { 3033 // Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to 3034 // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are 3035 // eliminated on x86-32 hosts. 3036 if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32) 3037 return Op; 3038 3039 if (ISD::isBuildVectorAllOnes(Op.Val)) 3040 return getOnesVector(Op.getValueType(), DAG); 3041 return getZeroVector(Op.getValueType(), DAG); 3042 } 3043 3044 MVT::ValueType VT = Op.getValueType(); 3045 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3046 unsigned EVTBits = MVT::getSizeInBits(EVT); 3047 3048 unsigned NumElems = Op.getNumOperands(); 3049 unsigned NumZero = 0; 3050 unsigned NumNonZero = 0; 3051 unsigned NonZeros = 0; 3052 bool IsAllConstants = true; 3053 SmallSet<SDOperand, 8> Values; 3054 for (unsigned i = 0; i < NumElems; ++i) { 3055 SDOperand Elt = Op.getOperand(i); 3056 if (Elt.getOpcode() == ISD::UNDEF) 3057 continue; 3058 Values.insert(Elt); 3059 if (Elt.getOpcode() != ISD::Constant && 3060 Elt.getOpcode() != ISD::ConstantFP) 3061 IsAllConstants = false; 3062 if (isZeroNode(Elt)) 3063 NumZero++; 3064 else { 3065 NonZeros |= (1 << i); 3066 NumNonZero++; 3067 } 3068 } 3069 3070 if (NumNonZero == 0) { 3071 // All undef vector. Return an UNDEF. All zero vectors were handled above. 3072 return DAG.getNode(ISD::UNDEF, VT); 3073 } 3074 3075 // Special case for single non-zero, non-undef, element. 3076 if (NumNonZero == 1 && NumElems <= 4) { 3077 unsigned Idx = CountTrailingZeros_32(NonZeros); 3078 SDOperand Item = Op.getOperand(Idx); 3079 3080 // If this is an insertion of an i64 value on x86-32, and if the top bits of 3081 // the value are obviously zero, truncate the value to i32 and do the 3082 // insertion that way. Only do this if the value is non-constant or if the 3083 // value is a constant being inserted into element 0. It is cheaper to do 3084 // a constant pool load than it is to do a movd + shuffle. 3085 if (EVT == MVT::i64 && !Subtarget->is64Bit() && 3086 (!IsAllConstants || Idx == 0)) { 3087 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 3088 // Handle MMX and SSE both. 3089 MVT::ValueType VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32; 3090 MVT::ValueType VecElts = VT == MVT::v2i64 ? 4 : 2; 3091 3092 // Truncate the value (which may itself be a constant) to i32, and 3093 // convert it to a vector with movd (S2V+shuffle to zero extend). 3094 Item = DAG.getNode(ISD::TRUNCATE, MVT::i32, Item); 3095 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VecVT, Item); 3096 Item = getShuffleVectorZeroOrUndef(Item, 0, true, DAG); 3097 3098 // Now we have our 32-bit value zero extended in the low element of 3099 // a vector. If Idx != 0, swizzle it into place. 3100 if (Idx != 0) { 3101 SDOperand Ops[] = { 3102 Item, DAG.getNode(ISD::UNDEF, Item.getValueType()), 3103 getSwapEltZeroMask(VecElts, Idx, DAG) 3104 }; 3105 Item = DAG.getNode(ISD::VECTOR_SHUFFLE, VecVT, Ops, 3); 3106 } 3107 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Item); 3108 } 3109 } 3110 3111 // If we have a constant or non-constant insertion into the low element of 3112 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 3113 // the rest of the elements. This will be matched as movd/movq/movss/movsd 3114 // depending on what the source datatype is. Because we can only get here 3115 // when NumElems <= 4, this only needs to handle i32/f32/i64/f64. 3116 if (Idx == 0 && 3117 // Don't do this for i64 values on x86-32. 3118 (EVT != MVT::i64 || Subtarget->is64Bit())) { 3119 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 3120 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 3121 return getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, DAG); 3122 } 3123 3124 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 3125 return SDOperand(); 3126 3127 // Otherwise, if this is a vector with i32 or f32 elements, and the element 3128 // is a non-constant being inserted into an element other than the low one, 3129 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 3130 // movd/movss) to move this into the low element, then shuffle it into 3131 // place. 3132 if (EVTBits == 32) { 3133 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 3134 3135 // Turn it into a shuffle of zero and zero-extended scalar to vector. 3136 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, DAG); 3137 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3138 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3139 SmallVector<SDOperand, 8> MaskVec; 3140 for (unsigned i = 0; i < NumElems; i++) 3141 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); 3142 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3143 &MaskVec[0], MaskVec.size()); 3144 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item, 3145 DAG.getNode(ISD::UNDEF, VT), Mask); 3146 } 3147 } 3148 3149 // Splat is obviously ok. Let legalizer expand it to a shuffle. 3150 if (Values.size() == 1) 3151 return SDOperand(); 3152 3153 // A vector full of immediates; various special cases are already 3154 // handled, so this is best done with a single constant-pool load. 3155 if (IsAllConstants) 3156 return SDOperand(); 3157 3158 // Let legalizer expand 2-wide build_vectors. 3159 if (EVTBits == 64) 3160 return SDOperand(); 3161 3162 // If element VT is < 32 bits, convert it to inserts into a zero vector. 3163 if (EVTBits == 8 && NumElems == 16) { 3164 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 3165 *this); 3166 if (V.Val) return V; 3167 } 3168 3169 if (EVTBits == 16 && NumElems == 8) { 3170 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 3171 *this); 3172 if (V.Val) return V; 3173 } 3174 3175 // If element VT is == 32 bits, turn it into a number of shuffles. 3176 SmallVector<SDOperand, 8> V; 3177 V.resize(NumElems); 3178 if (NumElems == 4 && NumZero > 0) { 3179 for (unsigned i = 0; i < 4; ++i) { 3180 bool isZero = !(NonZeros & (1 << i)); 3181 if (isZero) 3182 V[i] = getZeroVector(VT, DAG); 3183 else 3184 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3185 } 3186 3187 for (unsigned i = 0; i < 2; ++i) { 3188 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 3189 default: break; 3190 case 0: 3191 V[i] = V[i*2]; // Must be a zero vector. 3192 break; 3193 case 1: 3194 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2], 3195 getMOVLMask(NumElems, DAG)); 3196 break; 3197 case 2: 3198 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3199 getMOVLMask(NumElems, DAG)); 3200 break; 3201 case 3: 3202 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3203 getUnpacklMask(NumElems, DAG)); 3204 break; 3205 } 3206 } 3207 3208 // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd) 3209 // clears the upper bits. 3210 // FIXME: we can do the same for v4f32 case when we know both parts of 3211 // the lower half come from scalar_to_vector (loadf32). We should do 3212 // that in post legalizer dag combiner with target specific hooks. 3213 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0) 3214 return V[0]; 3215 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3216 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 3217 SmallVector<SDOperand, 8> MaskVec; 3218 bool Reverse = (NonZeros & 0x3) == 2; 3219 for (unsigned i = 0; i < 2; ++i) 3220 if (Reverse) 3221 MaskVec.push_back(DAG.getConstant(1-i, EVT)); 3222 else 3223 MaskVec.push_back(DAG.getConstant(i, EVT)); 3224 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 3225 for (unsigned i = 0; i < 2; ++i) 3226 if (Reverse) 3227 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); 3228 else 3229 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); 3230 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3231 &MaskVec[0], MaskVec.size()); 3232 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask); 3233 } 3234 3235 if (Values.size() > 2) { 3236 // Expand into a number of unpckl*. 3237 // e.g. for v4f32 3238 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 3239 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 3240 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 3241 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG); 3242 for (unsigned i = 0; i < NumElems; ++i) 3243 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3244 NumElems >>= 1; 3245 while (NumElems != 0) { 3246 for (unsigned i = 0; i < NumElems; ++i) 3247 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems], 3248 UnpckMask); 3249 NumElems >>= 1; 3250 } 3251 return V[0]; 3252 } 3253 3254 return SDOperand(); 3255} 3256 3257static 3258SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, 3259 SDOperand PermMask, SelectionDAG &DAG, 3260 TargetLowering &TLI) { 3261 SDOperand NewV; 3262 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(8); 3263 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3264 MVT::ValueType PtrVT = TLI.getPointerTy(); 3265 SmallVector<SDOperand, 8> MaskElts(PermMask.Val->op_begin(), 3266 PermMask.Val->op_end()); 3267 3268 // First record which half of which vector the low elements come from. 3269 SmallVector<unsigned, 4> LowQuad(4); 3270 for (unsigned i = 0; i < 4; ++i) { 3271 SDOperand Elt = MaskElts[i]; 3272 if (Elt.getOpcode() == ISD::UNDEF) 3273 continue; 3274 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3275 int QuadIdx = EltIdx / 4; 3276 ++LowQuad[QuadIdx]; 3277 } 3278 int BestLowQuad = -1; 3279 unsigned MaxQuad = 1; 3280 for (unsigned i = 0; i < 4; ++i) { 3281 if (LowQuad[i] > MaxQuad) { 3282 BestLowQuad = i; 3283 MaxQuad = LowQuad[i]; 3284 } 3285 } 3286 3287 // Record which half of which vector the high elements come from. 3288 SmallVector<unsigned, 4> HighQuad(4); 3289 for (unsigned i = 4; i < 8; ++i) { 3290 SDOperand Elt = MaskElts[i]; 3291 if (Elt.getOpcode() == ISD::UNDEF) 3292 continue; 3293 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3294 int QuadIdx = EltIdx / 4; 3295 ++HighQuad[QuadIdx]; 3296 } 3297 int BestHighQuad = -1; 3298 MaxQuad = 1; 3299 for (unsigned i = 0; i < 4; ++i) { 3300 if (HighQuad[i] > MaxQuad) { 3301 BestHighQuad = i; 3302 MaxQuad = HighQuad[i]; 3303 } 3304 } 3305 3306 // If it's possible to sort parts of either half with PSHUF{H|L}W, then do it. 3307 if (BestLowQuad != -1 || BestHighQuad != -1) { 3308 // First sort the 4 chunks in order using shufpd. 3309 SmallVector<SDOperand, 8> MaskVec; 3310 if (BestLowQuad != -1) 3311 MaskVec.push_back(DAG.getConstant(BestLowQuad, MVT::i32)); 3312 else 3313 MaskVec.push_back(DAG.getConstant(0, MVT::i32)); 3314 if (BestHighQuad != -1) 3315 MaskVec.push_back(DAG.getConstant(BestHighQuad, MVT::i32)); 3316 else 3317 MaskVec.push_back(DAG.getConstant(1, MVT::i32)); 3318 SDOperand Mask= DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, &MaskVec[0],2); 3319 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64, 3320 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V1), 3321 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V2), Mask); 3322 NewV = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, NewV); 3323 3324 // Now sort high and low parts separately. 3325 BitVector InOrder(8); 3326 if (BestLowQuad != -1) { 3327 // Sort lower half in order using PSHUFLW. 3328 MaskVec.clear(); 3329 bool AnyOutOrder = false; 3330 for (unsigned i = 0; i != 4; ++i) { 3331 SDOperand Elt = MaskElts[i]; 3332 if (Elt.getOpcode() == ISD::UNDEF) { 3333 MaskVec.push_back(Elt); 3334 InOrder.set(i); 3335 } else { 3336 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3337 if (EltIdx != i) 3338 AnyOutOrder = true; 3339 MaskVec.push_back(DAG.getConstant(EltIdx % 4, MaskEVT)); 3340 // If this element is in the right place after this shuffle, then 3341 // remember it. 3342 if ((int)(EltIdx / 4) == BestLowQuad) 3343 InOrder.set(i); 3344 } 3345 } 3346 if (AnyOutOrder) { 3347 for (unsigned i = 4; i != 8; ++i) 3348 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3349 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3350 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3351 } 3352 } 3353 3354 if (BestHighQuad != -1) { 3355 // Sort high half in order using PSHUFHW if possible. 3356 MaskVec.clear(); 3357 for (unsigned i = 0; i != 4; ++i) 3358 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3359 bool AnyOutOrder = false; 3360 for (unsigned i = 4; i != 8; ++i) { 3361 SDOperand Elt = MaskElts[i]; 3362 if (Elt.getOpcode() == ISD::UNDEF) { 3363 MaskVec.push_back(Elt); 3364 InOrder.set(i); 3365 } else { 3366 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3367 if (EltIdx != i) 3368 AnyOutOrder = true; 3369 MaskVec.push_back(DAG.getConstant((EltIdx % 4) + 4, MaskEVT)); 3370 // If this element is in the right place after this shuffle, then 3371 // remember it. 3372 if ((int)(EltIdx / 4) == BestHighQuad) 3373 InOrder.set(i); 3374 } 3375 } 3376 if (AnyOutOrder) { 3377 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3378 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3379 } 3380 } 3381 3382 // The other elements are put in the right place using pextrw and pinsrw. 3383 for (unsigned i = 0; i != 8; ++i) { 3384 if (InOrder[i]) 3385 continue; 3386 SDOperand Elt = MaskElts[i]; 3387 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3388 if (EltIdx == i) 3389 continue; 3390 SDOperand ExtOp = (EltIdx < 8) 3391 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3392 DAG.getConstant(EltIdx, PtrVT)) 3393 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3394 DAG.getConstant(EltIdx - 8, PtrVT)); 3395 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3396 DAG.getConstant(i, PtrVT)); 3397 } 3398 return NewV; 3399 } 3400 3401 // PSHUF{H|L}W are not used. Lower into extracts and inserts but try to use 3402 ///as few as possible. 3403 // First, let's find out how many elements are already in the right order. 3404 unsigned V1InOrder = 0; 3405 unsigned V1FromV1 = 0; 3406 unsigned V2InOrder = 0; 3407 unsigned V2FromV2 = 0; 3408 SmallVector<SDOperand, 8> V1Elts; 3409 SmallVector<SDOperand, 8> V2Elts; 3410 for (unsigned i = 0; i < 8; ++i) { 3411 SDOperand Elt = MaskElts[i]; 3412 if (Elt.getOpcode() == ISD::UNDEF) { 3413 V1Elts.push_back(Elt); 3414 V2Elts.push_back(Elt); 3415 ++V1InOrder; 3416 ++V2InOrder; 3417 continue; 3418 } 3419 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3420 if (EltIdx == i) { 3421 V1Elts.push_back(Elt); 3422 V2Elts.push_back(DAG.getConstant(i+8, MaskEVT)); 3423 ++V1InOrder; 3424 } else if (EltIdx == i+8) { 3425 V1Elts.push_back(Elt); 3426 V2Elts.push_back(DAG.getConstant(i, MaskEVT)); 3427 ++V2InOrder; 3428 } else if (EltIdx < 8) { 3429 V1Elts.push_back(Elt); 3430 ++V1FromV1; 3431 } else { 3432 V2Elts.push_back(DAG.getConstant(EltIdx-8, MaskEVT)); 3433 ++V2FromV2; 3434 } 3435 } 3436 3437 if (V2InOrder > V1InOrder) { 3438 PermMask = CommuteVectorShuffleMask(PermMask, DAG); 3439 std::swap(V1, V2); 3440 std::swap(V1Elts, V2Elts); 3441 std::swap(V1FromV1, V2FromV2); 3442 } 3443 3444 if ((V1FromV1 + V1InOrder) != 8) { 3445 // Some elements are from V2. 3446 if (V1FromV1) { 3447 // If there are elements that are from V1 but out of place, 3448 // then first sort them in place 3449 SmallVector<SDOperand, 8> MaskVec; 3450 for (unsigned i = 0; i < 8; ++i) { 3451 SDOperand Elt = V1Elts[i]; 3452 if (Elt.getOpcode() == ISD::UNDEF) { 3453 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3454 continue; 3455 } 3456 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3457 if (EltIdx >= 8) 3458 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3459 else 3460 MaskVec.push_back(DAG.getConstant(EltIdx, MaskEVT)); 3461 } 3462 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3463 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, V1, V1, Mask); 3464 } 3465 3466 NewV = V1; 3467 for (unsigned i = 0; i < 8; ++i) { 3468 SDOperand Elt = V1Elts[i]; 3469 if (Elt.getOpcode() == ISD::UNDEF) 3470 continue; 3471 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3472 if (EltIdx < 8) 3473 continue; 3474 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3475 DAG.getConstant(EltIdx - 8, PtrVT)); 3476 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3477 DAG.getConstant(i, PtrVT)); 3478 } 3479 return NewV; 3480 } else { 3481 // All elements are from V1. 3482 NewV = V1; 3483 for (unsigned i = 0; i < 8; ++i) { 3484 SDOperand Elt = V1Elts[i]; 3485 if (Elt.getOpcode() == ISD::UNDEF) 3486 continue; 3487 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3488 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3489 DAG.getConstant(EltIdx, PtrVT)); 3490 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3491 DAG.getConstant(i, PtrVT)); 3492 } 3493 return NewV; 3494 } 3495} 3496 3497/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 3498/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be 3499/// done when every pair / quad of shuffle mask elements point to elements in 3500/// the right sequence. e.g. 3501/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15> 3502static 3503SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2, 3504 MVT::ValueType VT, 3505 SDOperand PermMask, SelectionDAG &DAG, 3506 TargetLowering &TLI) { 3507 unsigned NumElems = PermMask.getNumOperands(); 3508 unsigned NewWidth = (NumElems == 4) ? 2 : 4; 3509 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NewWidth); 3510 MVT::ValueType NewVT = MaskVT; 3511 switch (VT) { 3512 case MVT::v4f32: NewVT = MVT::v2f64; break; 3513 case MVT::v4i32: NewVT = MVT::v2i64; break; 3514 case MVT::v8i16: NewVT = MVT::v4i32; break; 3515 case MVT::v16i8: NewVT = MVT::v4i32; break; 3516 default: assert(false && "Unexpected!"); 3517 } 3518 3519 if (NewWidth == 2) { 3520 if (MVT::isInteger(VT)) 3521 NewVT = MVT::v2i64; 3522 else 3523 NewVT = MVT::v2f64; 3524 } 3525 unsigned Scale = NumElems / NewWidth; 3526 SmallVector<SDOperand, 8> MaskVec; 3527 for (unsigned i = 0; i < NumElems; i += Scale) { 3528 unsigned StartIdx = ~0U; 3529 for (unsigned j = 0; j < Scale; ++j) { 3530 SDOperand Elt = PermMask.getOperand(i+j); 3531 if (Elt.getOpcode() == ISD::UNDEF) 3532 continue; 3533 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3534 if (StartIdx == ~0U) 3535 StartIdx = EltIdx - (EltIdx % Scale); 3536 if (EltIdx != StartIdx + j) 3537 return SDOperand(); 3538 } 3539 if (StartIdx == ~0U) 3540 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MVT::i32)); 3541 else 3542 MaskVec.push_back(DAG.getConstant(StartIdx / Scale, MVT::i32)); 3543 } 3544 3545 V1 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V1); 3546 V2 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V2); 3547 return DAG.getNode(ISD::VECTOR_SHUFFLE, NewVT, V1, V2, 3548 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3549 &MaskVec[0], MaskVec.size())); 3550} 3551 3552SDOperand 3553X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 3554 SDOperand V1 = Op.getOperand(0); 3555 SDOperand V2 = Op.getOperand(1); 3556 SDOperand PermMask = Op.getOperand(2); 3557 MVT::ValueType VT = Op.getValueType(); 3558 unsigned NumElems = PermMask.getNumOperands(); 3559 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 3560 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 3561 bool V1IsSplat = false; 3562 bool V2IsSplat = false; 3563 3564 if (isUndefShuffle(Op.Val)) 3565 return DAG.getNode(ISD::UNDEF, VT); 3566 3567 if (isZeroShuffle(Op.Val)) 3568 return getZeroVector(VT, DAG); 3569 3570 if (isIdentityMask(PermMask.Val)) 3571 return V1; 3572 else if (isIdentityMask(PermMask.Val, true)) 3573 return V2; 3574 3575 if (isSplatMask(PermMask.Val)) { 3576 if (NumElems <= 4) return Op; 3577 // Promote it to a v4i32 splat. 3578 return PromoteSplat(Op, DAG); 3579 } 3580 3581 // If the shuffle can be profitably rewritten as a narrower shuffle, then 3582 // do it! 3583 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 3584 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3585 if (NewOp.Val) 3586 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3587 } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 3588 // FIXME: Figure out a cleaner way to do this. 3589 // Try to make use of movq to zero out the top part. 3590 if (ISD::isBuildVectorAllZeros(V2.Val)) { 3591 SDOperand NewOp = RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3592 if (NewOp.Val) { 3593 SDOperand NewV1 = NewOp.getOperand(0); 3594 SDOperand NewV2 = NewOp.getOperand(1); 3595 SDOperand NewMask = NewOp.getOperand(2); 3596 if (isCommutedMOVL(NewMask.Val, true, false)) { 3597 NewOp = CommuteVectorShuffle(NewOp, NewV1, NewV2, NewMask, DAG); 3598 NewOp = DAG.getNode(ISD::VECTOR_SHUFFLE, NewOp.getValueType(), 3599 NewV1, NewV2, getMOVLMask(2, DAG)); 3600 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3601 } 3602 } 3603 } else if (ISD::isBuildVectorAllZeros(V1.Val)) { 3604 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3605 if (NewOp.Val && X86::isMOVLMask(NewOp.getOperand(2).Val)) 3606 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3607 } 3608 } 3609 3610 if (X86::isMOVLMask(PermMask.Val)) 3611 return (V1IsUndef) ? V2 : Op; 3612 3613 if (X86::isMOVSHDUPMask(PermMask.Val) || 3614 X86::isMOVSLDUPMask(PermMask.Val) || 3615 X86::isMOVHLPSMask(PermMask.Val) || 3616 X86::isMOVHPMask(PermMask.Val) || 3617 X86::isMOVLPMask(PermMask.Val)) 3618 return Op; 3619 3620 if (ShouldXformToMOVHLPS(PermMask.Val) || 3621 ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val)) 3622 return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3623 3624 bool Commuted = false; 3625 // FIXME: This should also accept a bitcast of a splat? Be careful, not 3626 // 1,1,1,1 -> v8i16 though. 3627 V1IsSplat = isSplatVector(V1.Val); 3628 V2IsSplat = isSplatVector(V2.Val); 3629 3630 // Canonicalize the splat or undef, if present, to be on the RHS. 3631 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 3632 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3633 std::swap(V1IsSplat, V2IsSplat); 3634 std::swap(V1IsUndef, V2IsUndef); 3635 Commuted = true; 3636 } 3637 3638 // FIXME: Figure out a cleaner way to do this. 3639 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) { 3640 if (V2IsUndef) return V1; 3641 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3642 if (V2IsSplat) { 3643 // V2 is a splat, so the mask may be malformed. That is, it may point 3644 // to any V2 element. The instruction selectior won't like this. Get 3645 // a corrected mask and commute to form a proper MOVS{S|D}. 3646 SDOperand NewMask = getMOVLMask(NumElems, DAG); 3647 if (NewMask.Val != PermMask.Val) 3648 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3649 } 3650 return Op; 3651 } 3652 3653 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3654 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3655 X86::isUNPCKLMask(PermMask.Val) || 3656 X86::isUNPCKHMask(PermMask.Val)) 3657 return Op; 3658 3659 if (V2IsSplat) { 3660 // Normalize mask so all entries that point to V2 points to its first 3661 // element then try to match unpck{h|l} again. If match, return a 3662 // new vector_shuffle with the corrected mask. 3663 SDOperand NewMask = NormalizeMask(PermMask, DAG); 3664 if (NewMask.Val != PermMask.Val) { 3665 if (X86::isUNPCKLMask(PermMask.Val, true)) { 3666 SDOperand NewMask = getUnpacklMask(NumElems, DAG); 3667 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3668 } else if (X86::isUNPCKHMask(PermMask.Val, true)) { 3669 SDOperand NewMask = getUnpackhMask(NumElems, DAG); 3670 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3671 } 3672 } 3673 } 3674 3675 // Normalize the node to match x86 shuffle ops if needed 3676 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val)) 3677 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3678 3679 if (Commuted) { 3680 // Commute is back and try unpck* again. 3681 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3682 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3683 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3684 X86::isUNPCKLMask(PermMask.Val) || 3685 X86::isUNPCKHMask(PermMask.Val)) 3686 return Op; 3687 } 3688 3689 // If VT is integer, try PSHUF* first, then SHUFP*. 3690 if (MVT::isInteger(VT)) { 3691 // MMX doesn't have PSHUFD; it does have PSHUFW. While it's theoretically 3692 // possible to shuffle a v2i32 using PSHUFW, that's not yet implemented. 3693 if (((MVT::getSizeInBits(VT) != 64 || NumElems == 4) && 3694 X86::isPSHUFDMask(PermMask.Val)) || 3695 X86::isPSHUFHWMask(PermMask.Val) || 3696 X86::isPSHUFLWMask(PermMask.Val)) { 3697 if (V2.getOpcode() != ISD::UNDEF) 3698 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3699 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3700 return Op; 3701 } 3702 3703 if (X86::isSHUFPMask(PermMask.Val) && 3704 MVT::getSizeInBits(VT) != 64) // Don't do this for MMX. 3705 return Op; 3706 } else { 3707 // Floating point cases in the other order. 3708 if (X86::isSHUFPMask(PermMask.Val)) 3709 return Op; 3710 if (X86::isPSHUFDMask(PermMask.Val) || 3711 X86::isPSHUFHWMask(PermMask.Val) || 3712 X86::isPSHUFLWMask(PermMask.Val)) { 3713 if (V2.getOpcode() != ISD::UNDEF) 3714 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3715 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3716 return Op; 3717 } 3718 } 3719 3720 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 3721 if (VT == MVT::v8i16) { 3722 SDOperand NewOp = LowerVECTOR_SHUFFLEv8i16(V1, V2, PermMask, DAG, *this); 3723 if (NewOp.Val) 3724 return NewOp; 3725 } 3726 3727 // Handle all 4 wide cases with a number of shuffles. 3728 if (NumElems == 4 && MVT::getSizeInBits(VT) != 64) { 3729 // Don't do this for MMX. 3730 MVT::ValueType MaskVT = PermMask.getValueType(); 3731 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3732 SmallVector<std::pair<int, int>, 8> Locs; 3733 Locs.reserve(NumElems); 3734 SmallVector<SDOperand, 8> Mask1(NumElems, 3735 DAG.getNode(ISD::UNDEF, MaskEVT)); 3736 SmallVector<SDOperand, 8> Mask2(NumElems, 3737 DAG.getNode(ISD::UNDEF, MaskEVT)); 3738 unsigned NumHi = 0; 3739 unsigned NumLo = 0; 3740 // If no more than two elements come from either vector. This can be 3741 // implemented with two shuffles. First shuffle gather the elements. 3742 // The second shuffle, which takes the first shuffle as both of its 3743 // vector operands, put the elements into the right order. 3744 for (unsigned i = 0; i != NumElems; ++i) { 3745 SDOperand Elt = PermMask.getOperand(i); 3746 if (Elt.getOpcode() == ISD::UNDEF) { 3747 Locs[i] = std::make_pair(-1, -1); 3748 } else { 3749 unsigned Val = cast<ConstantSDNode>(Elt)->getValue(); 3750 if (Val < NumElems) { 3751 Locs[i] = std::make_pair(0, NumLo); 3752 Mask1[NumLo] = Elt; 3753 NumLo++; 3754 } else { 3755 Locs[i] = std::make_pair(1, NumHi); 3756 if (2+NumHi < NumElems) 3757 Mask1[2+NumHi] = Elt; 3758 NumHi++; 3759 } 3760 } 3761 } 3762 if (NumLo <= 2 && NumHi <= 2) { 3763 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3764 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3765 &Mask1[0], Mask1.size())); 3766 for (unsigned i = 0; i != NumElems; ++i) { 3767 if (Locs[i].first == -1) 3768 continue; 3769 else { 3770 unsigned Idx = (i < NumElems/2) ? 0 : NumElems; 3771 Idx += Locs[i].first * (NumElems/2) + Locs[i].second; 3772 Mask2[i] = DAG.getConstant(Idx, MaskEVT); 3773 } 3774 } 3775 3776 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, 3777 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3778 &Mask2[0], Mask2.size())); 3779 } 3780 3781 // Break it into (shuffle shuffle_hi, shuffle_lo). 3782 Locs.clear(); 3783 SmallVector<SDOperand,8> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3784 SmallVector<SDOperand,8> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3785 SmallVector<SDOperand,8> *MaskPtr = &LoMask; 3786 unsigned MaskIdx = 0; 3787 unsigned LoIdx = 0; 3788 unsigned HiIdx = NumElems/2; 3789 for (unsigned i = 0; i != NumElems; ++i) { 3790 if (i == NumElems/2) { 3791 MaskPtr = &HiMask; 3792 MaskIdx = 1; 3793 LoIdx = 0; 3794 HiIdx = NumElems/2; 3795 } 3796 SDOperand Elt = PermMask.getOperand(i); 3797 if (Elt.getOpcode() == ISD::UNDEF) { 3798 Locs[i] = std::make_pair(-1, -1); 3799 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) { 3800 Locs[i] = std::make_pair(MaskIdx, LoIdx); 3801 (*MaskPtr)[LoIdx] = Elt; 3802 LoIdx++; 3803 } else { 3804 Locs[i] = std::make_pair(MaskIdx, HiIdx); 3805 (*MaskPtr)[HiIdx] = Elt; 3806 HiIdx++; 3807 } 3808 } 3809 3810 SDOperand LoShuffle = 3811 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3812 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3813 &LoMask[0], LoMask.size())); 3814 SDOperand HiShuffle = 3815 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3816 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3817 &HiMask[0], HiMask.size())); 3818 SmallVector<SDOperand, 8> MaskOps; 3819 for (unsigned i = 0; i != NumElems; ++i) { 3820 if (Locs[i].first == -1) { 3821 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3822 } else { 3823 unsigned Idx = Locs[i].first * NumElems + Locs[i].second; 3824 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT)); 3825 } 3826 } 3827 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle, 3828 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3829 &MaskOps[0], MaskOps.size())); 3830 } 3831 3832 return SDOperand(); 3833} 3834 3835SDOperand 3836X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDOperand Op, 3837 SelectionDAG &DAG) { 3838 MVT::ValueType VT = Op.getValueType(); 3839 if (MVT::getSizeInBits(VT) == 8) { 3840 SDOperand Extract = DAG.getNode(X86ISD::PEXTRB, MVT::i32, 3841 Op.getOperand(0), Op.getOperand(1)); 3842 SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, 3843 DAG.getValueType(VT)); 3844 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3845 } else if (MVT::getSizeInBits(VT) == 16) { 3846 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, MVT::i32, 3847 Op.getOperand(0), Op.getOperand(1)); 3848 SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, 3849 DAG.getValueType(VT)); 3850 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3851 } 3852 return SDOperand(); 3853} 3854 3855 3856SDOperand 3857X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3858 if (!isa<ConstantSDNode>(Op.getOperand(1))) 3859 return SDOperand(); 3860 3861 if (Subtarget->hasSSE41()) 3862 return LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 3863 3864 MVT::ValueType VT = Op.getValueType(); 3865 // TODO: handle v16i8. 3866 if (MVT::getSizeInBits(VT) == 16) { 3867 SDOperand Vec = Op.getOperand(0); 3868 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3869 if (Idx == 0) 3870 return DAG.getNode(ISD::TRUNCATE, MVT::i16, 3871 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, 3872 DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Vec), 3873 Op.getOperand(1))); 3874 // Transform it so it match pextrw which produces a 32-bit result. 3875 MVT::ValueType EVT = (MVT::ValueType)(VT+1); 3876 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, 3877 Op.getOperand(0), Op.getOperand(1)); 3878 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, 3879 DAG.getValueType(VT)); 3880 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3881 } else if (MVT::getSizeInBits(VT) == 32) { 3882 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3883 if (Idx == 0) 3884 return Op; 3885 // SHUFPS the element to the lowest double word, then movss. 3886 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3887 SmallVector<SDOperand, 8> IdxVec; 3888 IdxVec. 3889 push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT))); 3890 IdxVec. 3891 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3892 IdxVec. 3893 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3894 IdxVec. 3895 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3896 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3897 &IdxVec[0], IdxVec.size()); 3898 SDOperand Vec = Op.getOperand(0); 3899 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3900 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3901 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3902 DAG.getIntPtrConstant(0)); 3903 } else if (MVT::getSizeInBits(VT) == 64) { 3904 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 3905 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 3906 // to match extract_elt for f64. 3907 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3908 if (Idx == 0) 3909 return Op; 3910 3911 // UNPCKHPD the element to the lowest double word, then movsd. 3912 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 3913 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 3914 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3915 SmallVector<SDOperand, 8> IdxVec; 3916 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT))); 3917 IdxVec. 3918 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3919 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3920 &IdxVec[0], IdxVec.size()); 3921 SDOperand Vec = Op.getOperand(0); 3922 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3923 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3924 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3925 DAG.getIntPtrConstant(0)); 3926 } 3927 3928 return SDOperand(); 3929} 3930 3931SDOperand 3932X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG){ 3933 MVT::ValueType VT = Op.getValueType(); 3934 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3935 3936 SDOperand N0 = Op.getOperand(0); 3937 SDOperand N1 = Op.getOperand(1); 3938 SDOperand N2 = Op.getOperand(2); 3939 3940 if ((MVT::getSizeInBits(EVT) == 8) || (MVT::getSizeInBits(EVT) == 16)) { 3941 unsigned Opc = (MVT::getSizeInBits(EVT) == 8) ? X86ISD::PINSRB 3942 : X86ISD::PINSRW; 3943 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 3944 // argument. 3945 if (N1.getValueType() != MVT::i32) 3946 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3947 if (N2.getValueType() != MVT::i32) 3948 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); 3949 return DAG.getNode(Opc, VT, N0, N1, N2); 3950 } else if (EVT == MVT::f32) { 3951 // Bits [7:6] of the constant are the source select. This will always be 3952 // zero here. The DAG Combiner may combine an extract_elt index into these 3953 // bits. For example (insert (extract, 3), 2) could be matched by putting 3954 // the '3' into bits [7:6] of X86ISD::INSERTPS. 3955 // Bits [5:4] of the constant are the destination select. This is the 3956 // value of the incoming immediate. 3957 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 3958 // combine either bitwise AND or insert of float 0.0 to set these bits. 3959 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue() << 4); 3960 return DAG.getNode(X86ISD::INSERTPS, VT, N0, N1, N2); 3961 } 3962 return SDOperand(); 3963} 3964 3965SDOperand 3966X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3967 MVT::ValueType VT = Op.getValueType(); 3968 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3969 3970 if (Subtarget->hasSSE41()) 3971 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 3972 3973 if (EVT == MVT::i8) 3974 return SDOperand(); 3975 3976 SDOperand N0 = Op.getOperand(0); 3977 SDOperand N1 = Op.getOperand(1); 3978 SDOperand N2 = Op.getOperand(2); 3979 3980 if (MVT::getSizeInBits(EVT) == 16) { 3981 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 3982 // as its second argument. 3983 if (N1.getValueType() != MVT::i32) 3984 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3985 if (N2.getValueType() != MVT::i32) 3986 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); 3987 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2); 3988 } 3989 return SDOperand(); 3990} 3991 3992SDOperand 3993X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3994 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); 3995 MVT::ValueType VT = MVT::v2i32; 3996 switch (Op.getValueType()) { 3997 default: break; 3998 case MVT::v16i8: 3999 case MVT::v8i16: 4000 VT = MVT::v4i32; 4001 break; 4002 } 4003 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), 4004 DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, AnyExt)); 4005} 4006 4007// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 4008// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 4009// one of the above mentioned nodes. It has to be wrapped because otherwise 4010// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 4011// be used to form addressing mode. These wrapped nodes will be selected 4012// into MOV32ri. 4013SDOperand 4014X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 4015 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 4016 SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(), 4017 getPointerTy(), 4018 CP->getAlignment()); 4019 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4020 // With PIC, the address is actually $g + Offset. 4021 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4022 !Subtarget->isPICStyleRIPRel()) { 4023 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4024 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4025 Result); 4026 } 4027 4028 return Result; 4029} 4030 4031SDOperand 4032X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 4033 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 4034 SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); 4035 // If it's a debug information descriptor, don't mess with it. 4036 if (DAG.isVerifiedDebugInfoDesc(Op)) 4037 return Result; 4038 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4039 // With PIC, the address is actually $g + Offset. 4040 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4041 !Subtarget->isPICStyleRIPRel()) { 4042 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4043 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4044 Result); 4045 } 4046 4047 // For Darwin & Mingw32, external and weak symbols are indirect, so we want to 4048 // load the value at address GV, not the value of GV itself. This means that 4049 // the GlobalAddress must be in the base or index register of the address, not 4050 // the GV offset field. Platform check is inside GVRequiresExtraLoad() call 4051 // The same applies for external symbols during PIC codegen 4052 if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)) 4053 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, 4054 PseudoSourceValue::getGOT(), 0); 4055 4056 return Result; 4057} 4058 4059// Lower ISD::GlobalTLSAddress using the "general dynamic" model 4060static SDOperand 4061LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 4062 const MVT::ValueType PtrVT) { 4063 SDOperand InFlag; 4064 SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX, 4065 DAG.getNode(X86ISD::GlobalBaseReg, 4066 PtrVT), InFlag); 4067 InFlag = Chain.getValue(1); 4068 4069 // emit leal symbol@TLSGD(,%ebx,1), %eax 4070 SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag); 4071 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 4072 GA->getValueType(0), 4073 GA->getOffset()); 4074 SDOperand Ops[] = { Chain, TGA, InFlag }; 4075 SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3); 4076 InFlag = Result.getValue(2); 4077 Chain = Result.getValue(1); 4078 4079 // call ___tls_get_addr. This function receives its argument in 4080 // the register EAX. 4081 Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag); 4082 InFlag = Chain.getValue(1); 4083 4084 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4085 SDOperand Ops1[] = { Chain, 4086 DAG.getTargetExternalSymbol("___tls_get_addr", 4087 PtrVT), 4088 DAG.getRegister(X86::EAX, PtrVT), 4089 DAG.getRegister(X86::EBX, PtrVT), 4090 InFlag }; 4091 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5); 4092 InFlag = Chain.getValue(1); 4093 4094 return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag); 4095} 4096 4097// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 4098// "local exec" model. 4099static SDOperand 4100LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 4101 const MVT::ValueType PtrVT) { 4102 // Get the Thread Pointer 4103 SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT); 4104 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 4105 // exec) 4106 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 4107 GA->getValueType(0), 4108 GA->getOffset()); 4109 SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA); 4110 4111 if (GA->getGlobal()->isDeclaration()) // initial exec TLS model 4112 Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, 4113 PseudoSourceValue::getGOT(), 0); 4114 4115 // The address of the thread local variable is the add of the thread 4116 // pointer with the offset of the variable. 4117 return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset); 4118} 4119 4120SDOperand 4121X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { 4122 // TODO: implement the "local dynamic" model 4123 // TODO: implement the "initial exec"model for pic executables 4124 assert(!Subtarget->is64Bit() && Subtarget->isTargetELF() && 4125 "TLS not implemented for non-ELF and 64-bit targets"); 4126 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 4127 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 4128 // otherwise use the "Local Exec"TLS Model 4129 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 4130 return LowerToTLSGeneralDynamicModel(GA, DAG, getPointerTy()); 4131 else 4132 return LowerToTLSExecModel(GA, DAG, getPointerTy()); 4133} 4134 4135SDOperand 4136X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { 4137 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 4138 SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 4139 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4140 // With PIC, the address is actually $g + Offset. 4141 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4142 !Subtarget->isPICStyleRIPRel()) { 4143 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4144 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4145 Result); 4146 } 4147 4148 return Result; 4149} 4150 4151SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 4152 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 4153 SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); 4154 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4155 // With PIC, the address is actually $g + Offset. 4156 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4157 !Subtarget->isPICStyleRIPRel()) { 4158 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4159 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4160 Result); 4161 } 4162 4163 return Result; 4164} 4165 4166/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and 4167/// take a 2 x i32 value to shift plus a shift amount. 4168SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { 4169 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 4170 MVT::ValueType VT = Op.getValueType(); 4171 unsigned VTBits = MVT::getSizeInBits(VT); 4172 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 4173 SDOperand ShOpLo = Op.getOperand(0); 4174 SDOperand ShOpHi = Op.getOperand(1); 4175 SDOperand ShAmt = Op.getOperand(2); 4176 SDOperand Tmp1 = isSRA ? 4177 DAG.getNode(ISD::SRA, VT, ShOpHi, DAG.getConstant(VTBits - 1, MVT::i8)) : 4178 DAG.getConstant(0, VT); 4179 4180 SDOperand Tmp2, Tmp3; 4181 if (Op.getOpcode() == ISD::SHL_PARTS) { 4182 Tmp2 = DAG.getNode(X86ISD::SHLD, VT, ShOpHi, ShOpLo, ShAmt); 4183 Tmp3 = DAG.getNode(ISD::SHL, VT, ShOpLo, ShAmt); 4184 } else { 4185 Tmp2 = DAG.getNode(X86ISD::SHRD, VT, ShOpLo, ShOpHi, ShAmt); 4186 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, VT, ShOpHi, ShAmt); 4187 } 4188 4189 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4190 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, 4191 DAG.getConstant(VTBits, MVT::i8)); 4192 SDOperand Cond = DAG.getNode(X86ISD::CMP, VT, 4193 AndNode, DAG.getConstant(0, MVT::i8)); 4194 4195 SDOperand Hi, Lo; 4196 SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4197 VTs = DAG.getNodeValueTypes(VT, MVT::Flag); 4198 SmallVector<SDOperand, 4> Ops; 4199 if (Op.getOpcode() == ISD::SHL_PARTS) { 4200 Ops.push_back(Tmp2); 4201 Ops.push_back(Tmp3); 4202 Ops.push_back(CC); 4203 Ops.push_back(Cond); 4204 Hi = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4205 4206 Ops.clear(); 4207 Ops.push_back(Tmp3); 4208 Ops.push_back(Tmp1); 4209 Ops.push_back(CC); 4210 Ops.push_back(Cond); 4211 Lo = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4212 } else { 4213 Ops.push_back(Tmp2); 4214 Ops.push_back(Tmp3); 4215 Ops.push_back(CC); 4216 Ops.push_back(Cond); 4217 Lo = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4218 4219 Ops.clear(); 4220 Ops.push_back(Tmp3); 4221 Ops.push_back(Tmp1); 4222 Ops.push_back(CC); 4223 Ops.push_back(Cond); 4224 Hi = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4225 } 4226 4227 VTs = DAG.getNodeValueTypes(VT, VT); 4228 Ops.clear(); 4229 Ops.push_back(Lo); 4230 Ops.push_back(Hi); 4231 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size()); 4232} 4233 4234SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 4235 MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); 4236 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 && 4237 "Unknown SINT_TO_FP to lower!"); 4238 4239 // These are really Legal; caller falls through into that case. 4240 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 4241 return SDOperand(); 4242 if (SrcVT == MVT::i64 && Op.getValueType() != MVT::f80 && 4243 Subtarget->is64Bit()) 4244 return SDOperand(); 4245 4246 unsigned Size = MVT::getSizeInBits(SrcVT)/8; 4247 MachineFunction &MF = DAG.getMachineFunction(); 4248 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 4249 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4250 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), 4251 StackSlot, 4252 PseudoSourceValue::getFixedStack(), 4253 SSFI); 4254 4255 // Build the FILD 4256 SDVTList Tys; 4257 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 4258 if (useSSE) 4259 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 4260 else 4261 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 4262 SmallVector<SDOperand, 8> Ops; 4263 Ops.push_back(Chain); 4264 Ops.push_back(StackSlot); 4265 Ops.push_back(DAG.getValueType(SrcVT)); 4266 SDOperand Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, 4267 Tys, &Ops[0], Ops.size()); 4268 4269 if (useSSE) { 4270 Chain = Result.getValue(1); 4271 SDOperand InFlag = Result.getValue(2); 4272 4273 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 4274 // shouldn't be necessary except that RFP cannot be live across 4275 // multiple blocks. When stackifier is fixed, they can be uncoupled. 4276 MachineFunction &MF = DAG.getMachineFunction(); 4277 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 4278 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4279 Tys = DAG.getVTList(MVT::Other); 4280 SmallVector<SDOperand, 8> Ops; 4281 Ops.push_back(Chain); 4282 Ops.push_back(Result); 4283 Ops.push_back(StackSlot); 4284 Ops.push_back(DAG.getValueType(Op.getValueType())); 4285 Ops.push_back(InFlag); 4286 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 4287 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, 4288 PseudoSourceValue::getFixedStack(), SSFI); 4289 } 4290 4291 return Result; 4292} 4293 4294std::pair<SDOperand,SDOperand> X86TargetLowering:: 4295FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) { 4296 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 && 4297 "Unknown FP_TO_SINT to lower!"); 4298 4299 // These are really Legal. 4300 if (Op.getValueType() == MVT::i32 && 4301 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 4302 return std::make_pair(SDOperand(), SDOperand()); 4303 if (Subtarget->is64Bit() && 4304 Op.getValueType() == MVT::i64 && 4305 Op.getOperand(0).getValueType() != MVT::f80) 4306 return std::make_pair(SDOperand(), SDOperand()); 4307 4308 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 4309 // stack slot. 4310 MachineFunction &MF = DAG.getMachineFunction(); 4311 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; 4312 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4313 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4314 unsigned Opc; 4315 switch (Op.getValueType()) { 4316 default: assert(0 && "Invalid FP_TO_SINT to lower!"); 4317 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 4318 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 4319 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 4320 } 4321 4322 SDOperand Chain = DAG.getEntryNode(); 4323 SDOperand Value = Op.getOperand(0); 4324 if (isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) { 4325 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 4326 Chain = DAG.getStore(Chain, Value, StackSlot, 4327 PseudoSourceValue::getFixedStack(), SSFI); 4328 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 4329 SDOperand Ops[] = { 4330 Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType()) 4331 }; 4332 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 4333 Chain = Value.getValue(1); 4334 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4335 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4336 } 4337 4338 // Build the FP_TO_INT*_IN_MEM 4339 SDOperand Ops[] = { Chain, Value, StackSlot }; 4340 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3); 4341 4342 return std::make_pair(FIST, StackSlot); 4343} 4344 4345SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 4346 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(Op, DAG); 4347 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4348 if (FIST.Val == 0) return SDOperand(); 4349 4350 // Load the result. 4351 return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0); 4352} 4353 4354SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) { 4355 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(SDOperand(N, 0), DAG); 4356 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4357 if (FIST.Val == 0) return 0; 4358 4359 // Return an i64 load from the stack slot. 4360 SDOperand Res = DAG.getLoad(MVT::i64, FIST, StackSlot, NULL, 0); 4361 4362 // Use a MERGE_VALUES node to drop the chain result value. 4363 return DAG.getNode(ISD::MERGE_VALUES, MVT::i64, Res).Val; 4364} 4365 4366SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { 4367 MVT::ValueType VT = Op.getValueType(); 4368 MVT::ValueType EltVT = VT; 4369 if (MVT::isVector(VT)) 4370 EltVT = MVT::getVectorElementType(VT); 4371 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 4372 std::vector<Constant*> CV; 4373 if (EltVT == MVT::f64) { 4374 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, ~(1ULL << 63)))); 4375 CV.push_back(C); 4376 CV.push_back(C); 4377 } else { 4378 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, ~(1U << 31)))); 4379 CV.push_back(C); 4380 CV.push_back(C); 4381 CV.push_back(C); 4382 CV.push_back(C); 4383 } 4384 Constant *C = ConstantVector::get(CV); 4385 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4386 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4387 PseudoSourceValue::getConstantPool(), 0, 4388 false, 16); 4389 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask); 4390} 4391 4392SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { 4393 MVT::ValueType VT = Op.getValueType(); 4394 MVT::ValueType EltVT = VT; 4395 unsigned EltNum = 1; 4396 if (MVT::isVector(VT)) { 4397 EltVT = MVT::getVectorElementType(VT); 4398 EltNum = MVT::getVectorNumElements(VT); 4399 } 4400 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 4401 std::vector<Constant*> CV; 4402 if (EltVT == MVT::f64) { 4403 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, 1ULL << 63))); 4404 CV.push_back(C); 4405 CV.push_back(C); 4406 } else { 4407 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, 1U << 31))); 4408 CV.push_back(C); 4409 CV.push_back(C); 4410 CV.push_back(C); 4411 CV.push_back(C); 4412 } 4413 Constant *C = ConstantVector::get(CV); 4414 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4415 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4416 PseudoSourceValue::getConstantPool(), 0, 4417 false, 16); 4418 if (MVT::isVector(VT)) { 4419 return DAG.getNode(ISD::BIT_CONVERT, VT, 4420 DAG.getNode(ISD::XOR, MVT::v2i64, 4421 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)), 4422 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Mask))); 4423 } else { 4424 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask); 4425 } 4426} 4427 4428SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { 4429 SDOperand Op0 = Op.getOperand(0); 4430 SDOperand Op1 = Op.getOperand(1); 4431 MVT::ValueType VT = Op.getValueType(); 4432 MVT::ValueType SrcVT = Op1.getValueType(); 4433 const Type *SrcTy = MVT::getTypeForValueType(SrcVT); 4434 4435 // If second operand is smaller, extend it first. 4436 if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) { 4437 Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1); 4438 SrcVT = VT; 4439 SrcTy = MVT::getTypeForValueType(SrcVT); 4440 } 4441 // And if it is bigger, shrink it first. 4442 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4443 Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1, DAG.getIntPtrConstant(1)); 4444 SrcVT = VT; 4445 SrcTy = MVT::getTypeForValueType(SrcVT); 4446 } 4447 4448 // At this point the operands and the result should have the same 4449 // type, and that won't be f80 since that is not custom lowered. 4450 4451 // First get the sign bit of second operand. 4452 std::vector<Constant*> CV; 4453 if (SrcVT == MVT::f64) { 4454 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 1ULL << 63)))); 4455 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4456 } else { 4457 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 1U << 31)))); 4458 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4459 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4460 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4461 } 4462 Constant *C = ConstantVector::get(CV); 4463 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4464 SDOperand Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx, 4465 PseudoSourceValue::getConstantPool(), 0, 4466 false, 16); 4467 SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); 4468 4469 // Shift sign bit right or left if the two operands have different types. 4470 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4471 // Op0 is MVT::f32, Op1 is MVT::f64. 4472 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit); 4473 SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit, 4474 DAG.getConstant(32, MVT::i32)); 4475 SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit); 4476 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit, 4477 DAG.getIntPtrConstant(0)); 4478 } 4479 4480 // Clear first operand sign bit. 4481 CV.clear(); 4482 if (VT == MVT::f64) { 4483 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, ~(1ULL << 63))))); 4484 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4485 } else { 4486 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, ~(1U << 31))))); 4487 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4488 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4489 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4490 } 4491 C = ConstantVector::get(CV); 4492 CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4493 SDOperand Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4494 PseudoSourceValue::getConstantPool(), 0, 4495 false, 16); 4496 SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2); 4497 4498 // Or the value with the sign bit. 4499 return DAG.getNode(X86ISD::FOR, VT, Val, SignBit); 4500} 4501 4502SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { 4503 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 4504 SDOperand Cond; 4505 SDOperand Op0 = Op.getOperand(0); 4506 SDOperand Op1 = Op.getOperand(1); 4507 SDOperand CC = Op.getOperand(2); 4508 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 4509 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); 4510 unsigned X86CC; 4511 4512 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, 4513 Op0, Op1, DAG)) { 4514 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4515 return DAG.getNode(X86ISD::SETCC, MVT::i8, 4516 DAG.getConstant(X86CC, MVT::i8), Cond); 4517 } 4518 4519 assert(isFP && "Illegal integer SetCC!"); 4520 4521 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4522 switch (SetCCOpcode) { 4523 default: assert(false && "Illegal floating point SetCC!"); 4524 case ISD::SETOEQ: { // !PF & ZF 4525 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4526 DAG.getConstant(X86::COND_NP, MVT::i8), Cond); 4527 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4528 DAG.getConstant(X86::COND_E, MVT::i8), Cond); 4529 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); 4530 } 4531 case ISD::SETUNE: { // PF | !ZF 4532 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4533 DAG.getConstant(X86::COND_P, MVT::i8), Cond); 4534 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4535 DAG.getConstant(X86::COND_NE, MVT::i8), Cond); 4536 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); 4537 } 4538 } 4539} 4540 4541 4542SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { 4543 bool addTest = true; 4544 SDOperand Cond = Op.getOperand(0); 4545 SDOperand CC; 4546 4547 if (Cond.getOpcode() == ISD::SETCC) 4548 Cond = LowerSETCC(Cond, DAG); 4549 4550 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4551 // setting operand in place of the X86ISD::SETCC. 4552 if (Cond.getOpcode() == X86ISD::SETCC) { 4553 CC = Cond.getOperand(0); 4554 4555 SDOperand Cmp = Cond.getOperand(1); 4556 unsigned Opc = Cmp.getOpcode(); 4557 MVT::ValueType VT = Op.getValueType(); 4558 4559 bool IllegalFPCMov = false; 4560 if (MVT::isFloatingPoint(VT) && !MVT::isVector(VT) && 4561 !isScalarFPTypeInSSEReg(VT)) // FPStack? 4562 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 4563 4564 if ((Opc == X86ISD::CMP || 4565 Opc == X86ISD::COMI || 4566 Opc == X86ISD::UCOMI) && !IllegalFPCMov) { 4567 Cond = Cmp; 4568 addTest = false; 4569 } 4570 } 4571 4572 if (addTest) { 4573 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4574 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4575 } 4576 4577 const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(), 4578 MVT::Flag); 4579 SmallVector<SDOperand, 4> Ops; 4580 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 4581 // condition is true. 4582 Ops.push_back(Op.getOperand(2)); 4583 Ops.push_back(Op.getOperand(1)); 4584 Ops.push_back(CC); 4585 Ops.push_back(Cond); 4586 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 4587} 4588 4589SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { 4590 bool addTest = true; 4591 SDOperand Chain = Op.getOperand(0); 4592 SDOperand Cond = Op.getOperand(1); 4593 SDOperand Dest = Op.getOperand(2); 4594 SDOperand CC; 4595 4596 if (Cond.getOpcode() == ISD::SETCC) 4597 Cond = LowerSETCC(Cond, DAG); 4598 4599 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4600 // setting operand in place of the X86ISD::SETCC. 4601 if (Cond.getOpcode() == X86ISD::SETCC) { 4602 CC = Cond.getOperand(0); 4603 4604 SDOperand Cmp = Cond.getOperand(1); 4605 unsigned Opc = Cmp.getOpcode(); 4606 if (Opc == X86ISD::CMP || 4607 Opc == X86ISD::COMI || 4608 Opc == X86ISD::UCOMI) { 4609 Cond = Cmp; 4610 addTest = false; 4611 } 4612 } 4613 4614 if (addTest) { 4615 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4616 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4617 } 4618 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), 4619 Chain, Op.getOperand(2), CC, Cond); 4620} 4621 4622 4623// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 4624// Calls to _alloca is needed to probe the stack when allocating more than 4k 4625// bytes in one go. Touching the stack at 4K increments is necessary to ensure 4626// that the guard pages used by the OS virtual memory manager are allocated in 4627// correct sequence. 4628SDOperand 4629X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, 4630 SelectionDAG &DAG) { 4631 assert(Subtarget->isTargetCygMing() && 4632 "This should be used only on Cygwin/Mingw targets"); 4633 4634 // Get the inputs. 4635 SDOperand Chain = Op.getOperand(0); 4636 SDOperand Size = Op.getOperand(1); 4637 // FIXME: Ensure alignment here 4638 4639 SDOperand Flag; 4640 4641 MVT::ValueType IntPtr = getPointerTy(); 4642 MVT::ValueType SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; 4643 4644 Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag); 4645 Flag = Chain.getValue(1); 4646 4647 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4648 SDOperand Ops[] = { Chain, 4649 DAG.getTargetExternalSymbol("_alloca", IntPtr), 4650 DAG.getRegister(X86::EAX, IntPtr), 4651 Flag }; 4652 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4); 4653 Flag = Chain.getValue(1); 4654 4655 Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1); 4656 4657 std::vector<MVT::ValueType> Tys; 4658 Tys.push_back(SPTy); 4659 Tys.push_back(MVT::Other); 4660 SDOperand Ops1[2] = { Chain.getValue(0), Chain }; 4661 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2); 4662} 4663 4664SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) { 4665 SDOperand InFlag(0, 0); 4666 SDOperand Chain = Op.getOperand(0); 4667 unsigned Align = 4668 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 4669 if (Align == 0) Align = 1; 4670 4671 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4672 // If not DWORD aligned or size is more than the threshold, call memset. 4673 // The libc version is likely to be faster for these cases. It can use the 4674 // address value and run time information about the CPU. 4675 if ((Align & 3) != 0 || 4676 (I && I->getValue() > Subtarget->getMaxInlineSizeThreshold())) { 4677 MVT::ValueType IntPtr = getPointerTy(); 4678 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 4679 TargetLowering::ArgListTy Args; 4680 TargetLowering::ArgListEntry Entry; 4681 Entry.Node = Op.getOperand(1); 4682 Entry.Ty = IntPtrTy; 4683 Args.push_back(Entry); 4684 // Extend the unsigned i8 argument to be an int value for the call. 4685 Entry.Node = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2)); 4686 Entry.Ty = IntPtrTy; 4687 Args.push_back(Entry); 4688 Entry.Node = Op.getOperand(3); 4689 Args.push_back(Entry); 4690 std::pair<SDOperand,SDOperand> CallResult = 4691 LowerCallTo(Chain, Type::VoidTy, false, false, false, CallingConv::C, 4692 false, DAG.getExternalSymbol("memset", IntPtr), Args, DAG); 4693 return CallResult.second; 4694 } 4695 4696 MVT::ValueType AVT; 4697 SDOperand Count; 4698 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4699 unsigned BytesLeft = 0; 4700 bool TwoRepStos = false; 4701 if (ValC) { 4702 unsigned ValReg; 4703 uint64_t Val = ValC->getValue() & 255; 4704 4705 // If the value is a constant, then we can potentially use larger sets. 4706 switch (Align & 3) { 4707 case 2: // WORD aligned 4708 AVT = MVT::i16; 4709 ValReg = X86::AX; 4710 Val = (Val << 8) | Val; 4711 break; 4712 case 0: // DWORD aligned 4713 AVT = MVT::i32; 4714 ValReg = X86::EAX; 4715 Val = (Val << 8) | Val; 4716 Val = (Val << 16) | Val; 4717 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned 4718 AVT = MVT::i64; 4719 ValReg = X86::RAX; 4720 Val = (Val << 32) | Val; 4721 } 4722 break; 4723 default: // Byte aligned 4724 AVT = MVT::i8; 4725 ValReg = X86::AL; 4726 Count = Op.getOperand(3); 4727 break; 4728 } 4729 4730 if (AVT > MVT::i8) { 4731 if (I) { 4732 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4733 Count = DAG.getIntPtrConstant(I->getValue() / UBytes); 4734 BytesLeft = I->getValue() % UBytes; 4735 } else { 4736 assert(AVT >= MVT::i32 && 4737 "Do not use rep;stos if not at least DWORD aligned"); 4738 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 4739 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 4740 TwoRepStos = true; 4741 } 4742 } 4743 4744 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT), 4745 InFlag); 4746 InFlag = Chain.getValue(1); 4747 } else { 4748 AVT = MVT::i8; 4749 Count = Op.getOperand(3); 4750 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag); 4751 InFlag = Chain.getValue(1); 4752 } 4753 4754 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4755 Count, InFlag); 4756 InFlag = Chain.getValue(1); 4757 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4758 Op.getOperand(1), InFlag); 4759 InFlag = Chain.getValue(1); 4760 4761 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4762 SmallVector<SDOperand, 8> Ops; 4763 Ops.push_back(Chain); 4764 Ops.push_back(DAG.getValueType(AVT)); 4765 Ops.push_back(InFlag); 4766 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4767 4768 if (TwoRepStos) { 4769 InFlag = Chain.getValue(1); 4770 Count = Op.getOperand(3); 4771 MVT::ValueType CVT = Count.getValueType(); 4772 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4773 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4774 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4775 Left, InFlag); 4776 InFlag = Chain.getValue(1); 4777 Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4778 Ops.clear(); 4779 Ops.push_back(Chain); 4780 Ops.push_back(DAG.getValueType(MVT::i8)); 4781 Ops.push_back(InFlag); 4782 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4783 } else if (BytesLeft) { 4784 // Issue stores for the last 1 - 7 bytes. 4785 SDOperand Value; 4786 unsigned Val = ValC->getValue() & 255; 4787 unsigned Offset = I->getValue() - BytesLeft; 4788 SDOperand DstAddr = Op.getOperand(1); 4789 MVT::ValueType AddrVT = DstAddr.getValueType(); 4790 if (BytesLeft >= 4) { 4791 Val = (Val << 8) | Val; 4792 Val = (Val << 16) | Val; 4793 Value = DAG.getConstant(Val, MVT::i32); 4794 Chain = DAG.getStore(Chain, Value, 4795 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4796 DAG.getConstant(Offset, AddrVT)), 4797 NULL, 0); 4798 BytesLeft -= 4; 4799 Offset += 4; 4800 } 4801 if (BytesLeft >= 2) { 4802 Value = DAG.getConstant((Val << 8) | Val, MVT::i16); 4803 Chain = DAG.getStore(Chain, Value, 4804 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4805 DAG.getConstant(Offset, AddrVT)), 4806 NULL, 0); 4807 BytesLeft -= 2; 4808 Offset += 2; 4809 } 4810 if (BytesLeft == 1) { 4811 Value = DAG.getConstant(Val, MVT::i8); 4812 Chain = DAG.getStore(Chain, Value, 4813 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4814 DAG.getConstant(Offset, AddrVT)), 4815 NULL, 0); 4816 } 4817 } 4818 4819 return Chain; 4820} 4821 4822SDOperand X86TargetLowering::LowerMEMCPYInline(SDOperand Chain, 4823 SDOperand Dest, 4824 SDOperand Source, 4825 unsigned Size, 4826 unsigned Align, 4827 SelectionDAG &DAG) { 4828 MVT::ValueType AVT; 4829 unsigned BytesLeft = 0; 4830 switch (Align & 3) { 4831 case 2: // WORD aligned 4832 AVT = MVT::i16; 4833 break; 4834 case 0: // DWORD aligned 4835 AVT = MVT::i32; 4836 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned 4837 AVT = MVT::i64; 4838 break; 4839 default: // Byte aligned 4840 AVT = MVT::i8; 4841 break; 4842 } 4843 4844 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4845 SDOperand Count = DAG.getIntPtrConstant(Size / UBytes); 4846 BytesLeft = Size % UBytes; 4847 4848 SDOperand InFlag(0, 0); 4849 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4850 Count, InFlag); 4851 InFlag = Chain.getValue(1); 4852 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4853 Dest, InFlag); 4854 InFlag = Chain.getValue(1); 4855 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI, 4856 Source, InFlag); 4857 InFlag = Chain.getValue(1); 4858 4859 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4860 SmallVector<SDOperand, 8> Ops; 4861 Ops.push_back(Chain); 4862 Ops.push_back(DAG.getValueType(AVT)); 4863 Ops.push_back(InFlag); 4864 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4865 4866 if (BytesLeft) { 4867 // Issue loads and stores for the last 1 - 7 bytes. 4868 unsigned Offset = Size - BytesLeft; 4869 SDOperand DstAddr = Dest; 4870 MVT::ValueType DstVT = DstAddr.getValueType(); 4871 SDOperand SrcAddr = Source; 4872 MVT::ValueType SrcVT = SrcAddr.getValueType(); 4873 SDOperand Value; 4874 if (BytesLeft >= 4) { 4875 Value = DAG.getLoad(MVT::i32, Chain, 4876 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4877 DAG.getConstant(Offset, SrcVT)), 4878 NULL, 0); 4879 Chain = Value.getValue(1); 4880 Chain = DAG.getStore(Chain, Value, 4881 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4882 DAG.getConstant(Offset, DstVT)), 4883 NULL, 0); 4884 BytesLeft -= 4; 4885 Offset += 4; 4886 } 4887 if (BytesLeft >= 2) { 4888 Value = DAG.getLoad(MVT::i16, Chain, 4889 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4890 DAG.getConstant(Offset, SrcVT)), 4891 NULL, 0); 4892 Chain = Value.getValue(1); 4893 Chain = DAG.getStore(Chain, Value, 4894 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4895 DAG.getConstant(Offset, DstVT)), 4896 NULL, 0); 4897 BytesLeft -= 2; 4898 Offset += 2; 4899 } 4900 4901 if (BytesLeft == 1) { 4902 Value = DAG.getLoad(MVT::i8, Chain, 4903 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4904 DAG.getConstant(Offset, SrcVT)), 4905 NULL, 0); 4906 Chain = Value.getValue(1); 4907 Chain = DAG.getStore(Chain, Value, 4908 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4909 DAG.getConstant(Offset, DstVT)), 4910 NULL, 0); 4911 } 4912 } 4913 4914 return Chain; 4915} 4916 4917/// Expand the result of: i64,outchain = READCYCLECOUNTER inchain 4918SDNode *X86TargetLowering::ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG){ 4919 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4920 SDOperand TheChain = N->getOperand(0); 4921 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheChain, 1); 4922 if (Subtarget->is64Bit()) { 4923 SDOperand rax = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); 4924 SDOperand rdx = DAG.getCopyFromReg(rax.getValue(1), X86::RDX, 4925 MVT::i64, rax.getValue(2)); 4926 SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, rdx, 4927 DAG.getConstant(32, MVT::i8)); 4928 SDOperand Ops[] = { 4929 DAG.getNode(ISD::OR, MVT::i64, rax, Tmp), rdx.getValue(1) 4930 }; 4931 4932 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4933 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4934 } 4935 4936 SDOperand eax = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); 4937 SDOperand edx = DAG.getCopyFromReg(eax.getValue(1), X86::EDX, 4938 MVT::i32, eax.getValue(2)); 4939 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 4940 SDOperand Ops[] = { eax, edx }; 4941 Ops[0] = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Ops, 2); 4942 4943 // Use a MERGE_VALUES to return the value and chain. 4944 Ops[1] = edx.getValue(1); 4945 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4946 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4947} 4948 4949SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { 4950 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 4951 4952 if (!Subtarget->is64Bit()) { 4953 // vastart just stores the address of the VarArgsFrameIndex slot into the 4954 // memory location argument. 4955 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4956 return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV, 0); 4957 } 4958 4959 // __va_list_tag: 4960 // gp_offset (0 - 6 * 8) 4961 // fp_offset (48 - 48 + 8 * 16) 4962 // overflow_arg_area (point to parameters coming in memory). 4963 // reg_save_area 4964 SmallVector<SDOperand, 8> MemOps; 4965 SDOperand FIN = Op.getOperand(1); 4966 // Store gp_offset 4967 SDOperand Store = DAG.getStore(Op.getOperand(0), 4968 DAG.getConstant(VarArgsGPOffset, MVT::i32), 4969 FIN, SV, 0); 4970 MemOps.push_back(Store); 4971 4972 // Store fp_offset 4973 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4974 Store = DAG.getStore(Op.getOperand(0), 4975 DAG.getConstant(VarArgsFPOffset, MVT::i32), 4976 FIN, SV, 0); 4977 MemOps.push_back(Store); 4978 4979 // Store ptr to overflow_arg_area 4980 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4981 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4982 Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV, 0); 4983 MemOps.push_back(Store); 4984 4985 // Store ptr to reg_save_area. 4986 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); 4987 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 4988 Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV, 0); 4989 MemOps.push_back(Store); 4990 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size()); 4991} 4992 4993SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) { 4994 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 4995 SDOperand Chain = Op.getOperand(0); 4996 SDOperand DstPtr = Op.getOperand(1); 4997 SDOperand SrcPtr = Op.getOperand(2); 4998 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 4999 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 5000 5001 SrcPtr = DAG.getLoad(getPointerTy(), Chain, SrcPtr, SrcSV, 0); 5002 Chain = SrcPtr.getValue(1); 5003 for (unsigned i = 0; i < 3; ++i) { 5004 SDOperand Val = DAG.getLoad(MVT::i64, Chain, SrcPtr, SrcSV, 0); 5005 Chain = Val.getValue(1); 5006 Chain = DAG.getStore(Chain, Val, DstPtr, DstSV, 0); 5007 if (i == 2) 5008 break; 5009 SrcPtr = DAG.getNode(ISD::ADD, getPointerTy(), SrcPtr, 5010 DAG.getIntPtrConstant(8)); 5011 DstPtr = DAG.getNode(ISD::ADD, getPointerTy(), DstPtr, 5012 DAG.getIntPtrConstant(8)); 5013 } 5014 return Chain; 5015} 5016 5017SDOperand 5018X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 5019 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); 5020 switch (IntNo) { 5021 default: return SDOperand(); // Don't custom lower most intrinsics. 5022 // Comparison intrinsics. 5023 case Intrinsic::x86_sse_comieq_ss: 5024 case Intrinsic::x86_sse_comilt_ss: 5025 case Intrinsic::x86_sse_comile_ss: 5026 case Intrinsic::x86_sse_comigt_ss: 5027 case Intrinsic::x86_sse_comige_ss: 5028 case Intrinsic::x86_sse_comineq_ss: 5029 case Intrinsic::x86_sse_ucomieq_ss: 5030 case Intrinsic::x86_sse_ucomilt_ss: 5031 case Intrinsic::x86_sse_ucomile_ss: 5032 case Intrinsic::x86_sse_ucomigt_ss: 5033 case Intrinsic::x86_sse_ucomige_ss: 5034 case Intrinsic::x86_sse_ucomineq_ss: 5035 case Intrinsic::x86_sse2_comieq_sd: 5036 case Intrinsic::x86_sse2_comilt_sd: 5037 case Intrinsic::x86_sse2_comile_sd: 5038 case Intrinsic::x86_sse2_comigt_sd: 5039 case Intrinsic::x86_sse2_comige_sd: 5040 case Intrinsic::x86_sse2_comineq_sd: 5041 case Intrinsic::x86_sse2_ucomieq_sd: 5042 case Intrinsic::x86_sse2_ucomilt_sd: 5043 case Intrinsic::x86_sse2_ucomile_sd: 5044 case Intrinsic::x86_sse2_ucomigt_sd: 5045 case Intrinsic::x86_sse2_ucomige_sd: 5046 case Intrinsic::x86_sse2_ucomineq_sd: { 5047 unsigned Opc = 0; 5048 ISD::CondCode CC = ISD::SETCC_INVALID; 5049 switch (IntNo) { 5050 default: break; 5051 case Intrinsic::x86_sse_comieq_ss: 5052 case Intrinsic::x86_sse2_comieq_sd: 5053 Opc = X86ISD::COMI; 5054 CC = ISD::SETEQ; 5055 break; 5056 case Intrinsic::x86_sse_comilt_ss: 5057 case Intrinsic::x86_sse2_comilt_sd: 5058 Opc = X86ISD::COMI; 5059 CC = ISD::SETLT; 5060 break; 5061 case Intrinsic::x86_sse_comile_ss: 5062 case Intrinsic::x86_sse2_comile_sd: 5063 Opc = X86ISD::COMI; 5064 CC = ISD::SETLE; 5065 break; 5066 case Intrinsic::x86_sse_comigt_ss: 5067 case Intrinsic::x86_sse2_comigt_sd: 5068 Opc = X86ISD::COMI; 5069 CC = ISD::SETGT; 5070 break; 5071 case Intrinsic::x86_sse_comige_ss: 5072 case Intrinsic::x86_sse2_comige_sd: 5073 Opc = X86ISD::COMI; 5074 CC = ISD::SETGE; 5075 break; 5076 case Intrinsic::x86_sse_comineq_ss: 5077 case Intrinsic::x86_sse2_comineq_sd: 5078 Opc = X86ISD::COMI; 5079 CC = ISD::SETNE; 5080 break; 5081 case Intrinsic::x86_sse_ucomieq_ss: 5082 case Intrinsic::x86_sse2_ucomieq_sd: 5083 Opc = X86ISD::UCOMI; 5084 CC = ISD::SETEQ; 5085 break; 5086 case Intrinsic::x86_sse_ucomilt_ss: 5087 case Intrinsic::x86_sse2_ucomilt_sd: 5088 Opc = X86ISD::UCOMI; 5089 CC = ISD::SETLT; 5090 break; 5091 case Intrinsic::x86_sse_ucomile_ss: 5092 case Intrinsic::x86_sse2_ucomile_sd: 5093 Opc = X86ISD::UCOMI; 5094 CC = ISD::SETLE; 5095 break; 5096 case Intrinsic::x86_sse_ucomigt_ss: 5097 case Intrinsic::x86_sse2_ucomigt_sd: 5098 Opc = X86ISD::UCOMI; 5099 CC = ISD::SETGT; 5100 break; 5101 case Intrinsic::x86_sse_ucomige_ss: 5102 case Intrinsic::x86_sse2_ucomige_sd: 5103 Opc = X86ISD::UCOMI; 5104 CC = ISD::SETGE; 5105 break; 5106 case Intrinsic::x86_sse_ucomineq_ss: 5107 case Intrinsic::x86_sse2_ucomineq_sd: 5108 Opc = X86ISD::UCOMI; 5109 CC = ISD::SETNE; 5110 break; 5111 } 5112 5113 unsigned X86CC; 5114 SDOperand LHS = Op.getOperand(1); 5115 SDOperand RHS = Op.getOperand(2); 5116 translateX86CC(CC, true, X86CC, LHS, RHS, DAG); 5117 5118 SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS); 5119 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8, 5120 DAG.getConstant(X86CC, MVT::i8), Cond); 5121 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); 5122 } 5123 } 5124} 5125 5126SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { 5127 // Depths > 0 not supported yet! 5128 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 5129 return SDOperand(); 5130 5131 // Just load the return address 5132 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 5133 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); 5134} 5135 5136SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { 5137 // Depths > 0 not supported yet! 5138 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 5139 return SDOperand(); 5140 5141 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 5142 return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI, 5143 DAG.getIntPtrConstant(4)); 5144} 5145 5146SDOperand X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDOperand Op, 5147 SelectionDAG &DAG) { 5148 // Is not yet supported on x86-64 5149 if (Subtarget->is64Bit()) 5150 return SDOperand(); 5151 5152 return DAG.getIntPtrConstant(8); 5153} 5154 5155SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG) 5156{ 5157 assert(!Subtarget->is64Bit() && 5158 "Lowering of eh_return builtin is not supported yet on x86-64"); 5159 5160 MachineFunction &MF = DAG.getMachineFunction(); 5161 SDOperand Chain = Op.getOperand(0); 5162 SDOperand Offset = Op.getOperand(1); 5163 SDOperand Handler = Op.getOperand(2); 5164 5165 SDOperand Frame = DAG.getRegister(RegInfo->getFrameRegister(MF), 5166 getPointerTy()); 5167 5168 SDOperand StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame, 5169 DAG.getIntPtrConstant(-4UL)); 5170 StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset); 5171 Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0); 5172 Chain = DAG.getCopyToReg(Chain, X86::ECX, StoreAddr); 5173 MF.getRegInfo().addLiveOut(X86::ECX); 5174 5175 return DAG.getNode(X86ISD::EH_RETURN, MVT::Other, 5176 Chain, DAG.getRegister(X86::ECX, getPointerTy())); 5177} 5178 5179SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, 5180 SelectionDAG &DAG) { 5181 SDOperand Root = Op.getOperand(0); 5182 SDOperand Trmp = Op.getOperand(1); // trampoline 5183 SDOperand FPtr = Op.getOperand(2); // nested function 5184 SDOperand Nest = Op.getOperand(3); // 'nest' parameter value 5185 5186 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 5187 5188 const X86InstrInfo *TII = 5189 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 5190 5191 if (Subtarget->is64Bit()) { 5192 SDOperand OutChains[6]; 5193 5194 // Large code-model. 5195 5196 const unsigned char JMP64r = TII->getBaseOpcodeFor(X86::JMP64r); 5197 const unsigned char MOV64ri = TII->getBaseOpcodeFor(X86::MOV64ri); 5198 5199 const unsigned char N86R10 = 5200 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R10); 5201 const unsigned char N86R11 = 5202 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R11); 5203 5204 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 5205 5206 // Load the pointer to the nested function into R11. 5207 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 5208 SDOperand Addr = Trmp; 5209 OutChains[0] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5210 TrmpAddr, 0); 5211 5212 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(2, MVT::i64)); 5213 OutChains[1] = DAG.getStore(Root, FPtr, Addr, TrmpAddr, 2, false, 2); 5214 5215 // Load the 'nest' parameter value into R10. 5216 // R10 is specified in X86CallingConv.td 5217 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 5218 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(10, MVT::i64)); 5219 OutChains[2] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5220 TrmpAddr, 10); 5221 5222 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(12, MVT::i64)); 5223 OutChains[3] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 12, false, 2); 5224 5225 // Jump to the nested function. 5226 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 5227 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(20, MVT::i64)); 5228 OutChains[4] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5229 TrmpAddr, 20); 5230 5231 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 5232 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(22, MVT::i64)); 5233 OutChains[5] = DAG.getStore(Root, DAG.getConstant(ModRM, MVT::i8), Addr, 5234 TrmpAddr, 22); 5235 5236 SDOperand Ops[] = 5237 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 6) }; 5238 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5239 } else { 5240 const Function *Func = 5241 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 5242 unsigned CC = Func->getCallingConv(); 5243 unsigned NestReg; 5244 5245 switch (CC) { 5246 default: 5247 assert(0 && "Unsupported calling convention"); 5248 case CallingConv::C: 5249 case CallingConv::X86_StdCall: { 5250 // Pass 'nest' parameter in ECX. 5251 // Must be kept in sync with X86CallingConv.td 5252 NestReg = X86::ECX; 5253 5254 // Check that ECX wasn't needed by an 'inreg' parameter. 5255 const FunctionType *FTy = Func->getFunctionType(); 5256 const ParamAttrsList *Attrs = Func->getParamAttrs(); 5257 5258 if (Attrs && !Func->isVarArg()) { 5259 unsigned InRegCount = 0; 5260 unsigned Idx = 1; 5261 5262 for (FunctionType::param_iterator I = FTy->param_begin(), 5263 E = FTy->param_end(); I != E; ++I, ++Idx) 5264 if (Attrs->paramHasAttr(Idx, ParamAttr::InReg)) 5265 // FIXME: should only count parameters that are lowered to integers. 5266 InRegCount += (getTargetData()->getTypeSizeInBits(*I) + 31) / 32; 5267 5268 if (InRegCount > 2) { 5269 cerr << "Nest register in use - reduce number of inreg parameters!\n"; 5270 abort(); 5271 } 5272 } 5273 break; 5274 } 5275 case CallingConv::X86_FastCall: 5276 // Pass 'nest' parameter in EAX. 5277 // Must be kept in sync with X86CallingConv.td 5278 NestReg = X86::EAX; 5279 break; 5280 } 5281 5282 SDOperand OutChains[4]; 5283 SDOperand Addr, Disp; 5284 5285 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32)); 5286 Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr); 5287 5288 const unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri); 5289 const unsigned char N86Reg = 5290 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(NestReg); 5291 OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 5292 Trmp, TrmpAddr, 0); 5293 5294 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32)); 5295 OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 1, false, 1); 5296 5297 const unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP); 5298 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32)); 5299 OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr, 5300 TrmpAddr, 5, false, 1); 5301 5302 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(6, MVT::i32)); 5303 OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpAddr, 6, false, 1); 5304 5305 SDOperand Ops[] = 5306 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) }; 5307 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5308 } 5309} 5310 5311SDOperand X86TargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { 5312 /* 5313 The rounding mode is in bits 11:10 of FPSR, and has the following 5314 settings: 5315 00 Round to nearest 5316 01 Round to -inf 5317 10 Round to +inf 5318 11 Round to 0 5319 5320 FLT_ROUNDS, on the other hand, expects the following: 5321 -1 Undefined 5322 0 Round to 0 5323 1 Round to nearest 5324 2 Round to +inf 5325 3 Round to -inf 5326 5327 To perform the conversion, we do: 5328 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 5329 */ 5330 5331 MachineFunction &MF = DAG.getMachineFunction(); 5332 const TargetMachine &TM = MF.getTarget(); 5333 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 5334 unsigned StackAlignment = TFI.getStackAlignment(); 5335 MVT::ValueType VT = Op.getValueType(); 5336 5337 // Save FP Control Word to stack slot 5338 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment); 5339 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 5340 5341 SDOperand Chain = DAG.getNode(X86ISD::FNSTCW16m, MVT::Other, 5342 DAG.getEntryNode(), StackSlot); 5343 5344 // Load FP Control Word from stack slot 5345 SDOperand CWD = DAG.getLoad(MVT::i16, Chain, StackSlot, NULL, 0); 5346 5347 // Transform as necessary 5348 SDOperand CWD1 = 5349 DAG.getNode(ISD::SRL, MVT::i16, 5350 DAG.getNode(ISD::AND, MVT::i16, 5351 CWD, DAG.getConstant(0x800, MVT::i16)), 5352 DAG.getConstant(11, MVT::i8)); 5353 SDOperand CWD2 = 5354 DAG.getNode(ISD::SRL, MVT::i16, 5355 DAG.getNode(ISD::AND, MVT::i16, 5356 CWD, DAG.getConstant(0x400, MVT::i16)), 5357 DAG.getConstant(9, MVT::i8)); 5358 5359 SDOperand RetVal = 5360 DAG.getNode(ISD::AND, MVT::i16, 5361 DAG.getNode(ISD::ADD, MVT::i16, 5362 DAG.getNode(ISD::OR, MVT::i16, CWD1, CWD2), 5363 DAG.getConstant(1, MVT::i16)), 5364 DAG.getConstant(3, MVT::i16)); 5365 5366 5367 return DAG.getNode((MVT::getSizeInBits(VT) < 16 ? 5368 ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); 5369} 5370 5371SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) { 5372 MVT::ValueType VT = Op.getValueType(); 5373 MVT::ValueType OpVT = VT; 5374 unsigned NumBits = MVT::getSizeInBits(VT); 5375 5376 Op = Op.getOperand(0); 5377 if (VT == MVT::i8) { 5378 // Zero extend to i32 since there is not an i8 bsr. 5379 OpVT = MVT::i32; 5380 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5381 } 5382 5383 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 5384 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5385 Op = DAG.getNode(X86ISD::BSR, VTs, Op); 5386 5387 // If src is zero (i.e. bsr sets ZF), returns NumBits. 5388 SmallVector<SDOperand, 4> Ops; 5389 Ops.push_back(Op); 5390 Ops.push_back(DAG.getConstant(NumBits+NumBits-1, OpVT)); 5391 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5392 Ops.push_back(Op.getValue(1)); 5393 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5394 5395 // Finally xor with NumBits-1. 5396 Op = DAG.getNode(ISD::XOR, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 5397 5398 if (VT == MVT::i8) 5399 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5400 return Op; 5401} 5402 5403SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { 5404 MVT::ValueType VT = Op.getValueType(); 5405 MVT::ValueType OpVT = VT; 5406 unsigned NumBits = MVT::getSizeInBits(VT); 5407 5408 Op = Op.getOperand(0); 5409 if (VT == MVT::i8) { 5410 OpVT = MVT::i32; 5411 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5412 } 5413 5414 // Issue a bsf (scan bits forward) which also sets EFLAGS. 5415 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5416 Op = DAG.getNode(X86ISD::BSF, VTs, Op); 5417 5418 // If src is zero (i.e. bsf sets ZF), returns NumBits. 5419 SmallVector<SDOperand, 4> Ops; 5420 Ops.push_back(Op); 5421 Ops.push_back(DAG.getConstant(NumBits, OpVT)); 5422 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5423 Ops.push_back(Op.getValue(1)); 5424 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5425 5426 if (VT == MVT::i8) 5427 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5428 return Op; 5429} 5430 5431SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) { 5432 MVT::ValueType T = cast<AtomicSDNode>(Op.Val)->getVT(); 5433 unsigned Reg = 0; 5434 unsigned size = 0; 5435 switch(T) { 5436 case MVT::i8: Reg = X86::AL; size = 1; break; 5437 case MVT::i16: Reg = X86::AX; size = 2; break; 5438 case MVT::i32: Reg = X86::EAX; size = 4; break; 5439 case MVT::i64: 5440 if (Subtarget->is64Bit()) { 5441 Reg = X86::RAX; size = 8; 5442 } else //Should go away when LowerType stuff lands 5443 return SDOperand(ExpandATOMIC_LCS(Op.Val, DAG), 0); 5444 break; 5445 }; 5446 SDOperand cpIn = DAG.getCopyToReg(Op.getOperand(0), Reg, 5447 Op.getOperand(3), SDOperand()); 5448 SDOperand Ops[] = { cpIn.getValue(0), 5449 Op.getOperand(1), 5450 Op.getOperand(2), 5451 DAG.getTargetConstant(size, MVT::i8), 5452 cpIn.getValue(1) }; 5453 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 5454 SDOperand Result = DAG.getNode(X86ISD::LCMPXCHG_DAG, Tys, Ops, 5); 5455 SDOperand cpOut = 5456 DAG.getCopyFromReg(Result.getValue(0), Reg, T, Result.getValue(1)); 5457 return cpOut; 5458} 5459 5460SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) { 5461 MVT::ValueType T = cast<AtomicSDNode>(Op)->getVT(); 5462 assert (T == MVT::i64 && "Only know how to expand i64 CAS"); 5463 SDOperand cpInL, cpInH; 5464 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), 5465 DAG.getConstant(0, MVT::i32)); 5466 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), 5467 DAG.getConstant(1, MVT::i32)); 5468 cpInL = DAG.getCopyToReg(Op->getOperand(0), X86::EAX, 5469 cpInL, SDOperand()); 5470 cpInH = DAG.getCopyToReg(cpInL.getValue(0), X86::EDX, 5471 cpInH, cpInL.getValue(1)); 5472 SDOperand swapInL, swapInH; 5473 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(2), 5474 DAG.getConstant(0, MVT::i32)); 5475 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(2), 5476 DAG.getConstant(1, MVT::i32)); 5477 swapInL = DAG.getCopyToReg(cpInH.getValue(0), X86::EBX, 5478 swapInL, cpInH.getValue(1)); 5479 swapInH = DAG.getCopyToReg(swapInL.getValue(0), X86::ECX, 5480 swapInH, swapInL.getValue(1)); 5481 SDOperand Ops[] = { swapInH.getValue(0), 5482 Op->getOperand(1), 5483 swapInH.getValue(1)}; 5484 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 5485 SDOperand Result = DAG.getNode(X86ISD::LCMPXCHG8_DAG, Tys, Ops, 3); 5486 SDOperand cpOutL = DAG.getCopyFromReg(Result.getValue(0), X86::EAX, MVT::i32, 5487 Result.getValue(1)); 5488 SDOperand cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), X86::EDX, MVT::i32, 5489 cpOutL.getValue(2)); 5490 SDOperand OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 5491 SDOperand ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OpsF, 2); 5492 Tys = DAG.getVTList(MVT::i64, MVT::Other); 5493 return DAG.getNode(ISD::MERGE_VALUES, Tys, ResultVal, cpOutH.getValue(1)).Val; 5494} 5495 5496/// LowerOperation - Provide custom lowering hooks for some operations. 5497/// 5498SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 5499 switch (Op.getOpcode()) { 5500 default: assert(0 && "Should not custom lower this!"); 5501 case ISD::ATOMIC_LCS: return LowerLCS(Op,DAG); 5502 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 5503 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5504 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 5505 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 5506 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 5507 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5508 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 5509 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5510 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 5511 case ISD::SHL_PARTS: 5512 case ISD::SRA_PARTS: 5513 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 5514 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 5515 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 5516 case ISD::FABS: return LowerFABS(Op, DAG); 5517 case ISD::FNEG: return LowerFNEG(Op, DAG); 5518 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 5519 case ISD::SETCC: return LowerSETCC(Op, DAG); 5520 case ISD::SELECT: return LowerSELECT(Op, DAG); 5521 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 5522 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 5523 case ISD::CALL: return LowerCALL(Op, DAG); 5524 case ISD::RET: return LowerRET(Op, DAG); 5525 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); 5526 case ISD::MEMSET: return LowerMEMSET(Op, DAG); 5527 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG); 5528 case ISD::VASTART: return LowerVASTART(Op, DAG); 5529 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 5530 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 5531 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5532 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5533 case ISD::FRAME_TO_ARGS_OFFSET: 5534 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 5535 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 5536 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 5537 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 5538 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5539 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 5540 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 5541 5542 // FIXME: REMOVE THIS WHEN LegalizeDAGTypes lands. 5543 case ISD::READCYCLECOUNTER: 5544 return SDOperand(ExpandREADCYCLECOUNTER(Op.Val, DAG), 0); 5545 } 5546} 5547 5548/// ExpandOperation - Provide custom lowering hooks for expanding operations. 5549SDNode *X86TargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) { 5550 switch (N->getOpcode()) { 5551 default: assert(0 && "Should not custom lower this!"); 5552 case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG); 5553 case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG); 5554 case ISD::ATOMIC_LCS: return ExpandATOMIC_LCS(N, DAG); 5555 } 5556} 5557 5558const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 5559 switch (Opcode) { 5560 default: return NULL; 5561 case X86ISD::BSF: return "X86ISD::BSF"; 5562 case X86ISD::BSR: return "X86ISD::BSR"; 5563 case X86ISD::SHLD: return "X86ISD::SHLD"; 5564 case X86ISD::SHRD: return "X86ISD::SHRD"; 5565 case X86ISD::FAND: return "X86ISD::FAND"; 5566 case X86ISD::FOR: return "X86ISD::FOR"; 5567 case X86ISD::FXOR: return "X86ISD::FXOR"; 5568 case X86ISD::FSRL: return "X86ISD::FSRL"; 5569 case X86ISD::FILD: return "X86ISD::FILD"; 5570 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 5571 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 5572 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 5573 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 5574 case X86ISD::FLD: return "X86ISD::FLD"; 5575 case X86ISD::FST: return "X86ISD::FST"; 5576 case X86ISD::FP_GET_ST0: return "X86ISD::FP_GET_ST0"; 5577 case X86ISD::FP_GET_ST0_ST1: return "X86ISD::FP_GET_ST0_ST1"; 5578 case X86ISD::FP_SET_ST0: return "X86ISD::FP_SET_ST0"; 5579 case X86ISD::CALL: return "X86ISD::CALL"; 5580 case X86ISD::TAILCALL: return "X86ISD::TAILCALL"; 5581 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 5582 case X86ISD::CMP: return "X86ISD::CMP"; 5583 case X86ISD::COMI: return "X86ISD::COMI"; 5584 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 5585 case X86ISD::SETCC: return "X86ISD::SETCC"; 5586 case X86ISD::CMOV: return "X86ISD::CMOV"; 5587 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 5588 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 5589 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 5590 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 5591 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 5592 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 5593 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 5594 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 5595 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 5596 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 5597 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 5598 case X86ISD::FMAX: return "X86ISD::FMAX"; 5599 case X86ISD::FMIN: return "X86ISD::FMIN"; 5600 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 5601 case X86ISD::FRCP: return "X86ISD::FRCP"; 5602 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 5603 case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER"; 5604 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 5605 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 5606 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 5607 case X86ISD::LCMPXCHG_DAG: return "x86ISD::LCMPXCHG_DAG"; 5608 case X86ISD::LCMPXCHG8_DAG: return "x86ISD::LCMPXCHG8_DAG"; 5609 } 5610} 5611 5612// isLegalAddressingMode - Return true if the addressing mode represented 5613// by AM is legal for this target, for a load/store of the specified type. 5614bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 5615 const Type *Ty) const { 5616 // X86 supports extremely general addressing modes. 5617 5618 // X86 allows a sign-extended 32-bit immediate field as a displacement. 5619 if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1) 5620 return false; 5621 5622 if (AM.BaseGV) { 5623 // We can only fold this if we don't need an extra load. 5624 if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false)) 5625 return false; 5626 5627 // X86-64 only supports addr of globals in small code model. 5628 if (Subtarget->is64Bit()) { 5629 if (getTargetMachine().getCodeModel() != CodeModel::Small) 5630 return false; 5631 // If lower 4G is not available, then we must use rip-relative addressing. 5632 if (AM.BaseOffs || AM.Scale > 1) 5633 return false; 5634 } 5635 } 5636 5637 switch (AM.Scale) { 5638 case 0: 5639 case 1: 5640 case 2: 5641 case 4: 5642 case 8: 5643 // These scales always work. 5644 break; 5645 case 3: 5646 case 5: 5647 case 9: 5648 // These scales are formed with basereg+scalereg. Only accept if there is 5649 // no basereg yet. 5650 if (AM.HasBaseReg) 5651 return false; 5652 break; 5653 default: // Other stuff never works. 5654 return false; 5655 } 5656 5657 return true; 5658} 5659 5660 5661bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const { 5662 if (!Ty1->isInteger() || !Ty2->isInteger()) 5663 return false; 5664 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 5665 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 5666 if (NumBits1 <= NumBits2) 5667 return false; 5668 return Subtarget->is64Bit() || NumBits1 < 64; 5669} 5670 5671bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1, 5672 MVT::ValueType VT2) const { 5673 if (!MVT::isInteger(VT1) || !MVT::isInteger(VT2)) 5674 return false; 5675 unsigned NumBits1 = MVT::getSizeInBits(VT1); 5676 unsigned NumBits2 = MVT::getSizeInBits(VT2); 5677 if (NumBits1 <= NumBits2) 5678 return false; 5679 return Subtarget->is64Bit() || NumBits1 < 64; 5680} 5681 5682/// isShuffleMaskLegal - Targets can use this to indicate that they only 5683/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 5684/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 5685/// are assumed to be legal. 5686bool 5687X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { 5688 // Only do shuffles on 128-bit vector types for now. 5689 if (MVT::getSizeInBits(VT) == 64) return false; 5690 return (Mask.Val->getNumOperands() <= 4 || 5691 isIdentityMask(Mask.Val) || 5692 isIdentityMask(Mask.Val, true) || 5693 isSplatMask(Mask.Val) || 5694 isPSHUFHW_PSHUFLWMask(Mask.Val) || 5695 X86::isUNPCKLMask(Mask.Val) || 5696 X86::isUNPCKHMask(Mask.Val) || 5697 X86::isUNPCKL_v_undef_Mask(Mask.Val) || 5698 X86::isUNPCKH_v_undef_Mask(Mask.Val)); 5699} 5700 5701bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps, 5702 MVT::ValueType EVT, 5703 SelectionDAG &DAG) const { 5704 unsigned NumElts = BVOps.size(); 5705 // Only do shuffles on 128-bit vector types for now. 5706 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; 5707 if (NumElts == 2) return true; 5708 if (NumElts == 4) { 5709 return (isMOVLMask(&BVOps[0], 4) || 5710 isCommutedMOVL(&BVOps[0], 4, true) || 5711 isSHUFPMask(&BVOps[0], 4) || 5712 isCommutedSHUFP(&BVOps[0], 4)); 5713 } 5714 return false; 5715} 5716 5717//===----------------------------------------------------------------------===// 5718// X86 Scheduler Hooks 5719//===----------------------------------------------------------------------===// 5720 5721MachineBasicBlock * 5722X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 5723 MachineBasicBlock *BB) { 5724 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5725 switch (MI->getOpcode()) { 5726 default: assert(false && "Unexpected instr type to insert"); 5727 case X86::CMOV_FR32: 5728 case X86::CMOV_FR64: 5729 case X86::CMOV_V4F32: 5730 case X86::CMOV_V2F64: 5731 case X86::CMOV_V2I64: { 5732 // To "insert" a SELECT_CC instruction, we actually have to insert the 5733 // diamond control-flow pattern. The incoming instruction knows the 5734 // destination vreg to set, the condition code register to branch on, the 5735 // true/false values to select between, and a branch opcode to use. 5736 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5737 ilist<MachineBasicBlock>::iterator It = BB; 5738 ++It; 5739 5740 // thisMBB: 5741 // ... 5742 // TrueVal = ... 5743 // cmpTY ccX, r1, r2 5744 // bCC copy1MBB 5745 // fallthrough --> copy0MBB 5746 MachineBasicBlock *thisMBB = BB; 5747 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 5748 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 5749 unsigned Opc = 5750 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 5751 BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB); 5752 MachineFunction *F = BB->getParent(); 5753 F->getBasicBlockList().insert(It, copy0MBB); 5754 F->getBasicBlockList().insert(It, sinkMBB); 5755 // Update machine-CFG edges by first adding all successors of the current 5756 // block to the new block which will contain the Phi node for the select. 5757 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 5758 e = BB->succ_end(); i != e; ++i) 5759 sinkMBB->addSuccessor(*i); 5760 // Next, remove all successors of the current block, and add the true 5761 // and fallthrough blocks as its successors. 5762 while(!BB->succ_empty()) 5763 BB->removeSuccessor(BB->succ_begin()); 5764 BB->addSuccessor(copy0MBB); 5765 BB->addSuccessor(sinkMBB); 5766 5767 // copy0MBB: 5768 // %FalseValue = ... 5769 // # fallthrough to sinkMBB 5770 BB = copy0MBB; 5771 5772 // Update machine-CFG edges 5773 BB->addSuccessor(sinkMBB); 5774 5775 // sinkMBB: 5776 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 5777 // ... 5778 BB = sinkMBB; 5779 BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg()) 5780 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 5781 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 5782 5783 delete MI; // The pseudo instruction is gone now. 5784 return BB; 5785 } 5786 5787 case X86::FP32_TO_INT16_IN_MEM: 5788 case X86::FP32_TO_INT32_IN_MEM: 5789 case X86::FP32_TO_INT64_IN_MEM: 5790 case X86::FP64_TO_INT16_IN_MEM: 5791 case X86::FP64_TO_INT32_IN_MEM: 5792 case X86::FP64_TO_INT64_IN_MEM: 5793 case X86::FP80_TO_INT16_IN_MEM: 5794 case X86::FP80_TO_INT32_IN_MEM: 5795 case X86::FP80_TO_INT64_IN_MEM: { 5796 // Change the floating point control register to use "round towards zero" 5797 // mode when truncating to an integer value. 5798 MachineFunction *F = BB->getParent(); 5799 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2); 5800 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx); 5801 5802 // Load the old value of the high byte of the control word... 5803 unsigned OldCW = 5804 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass); 5805 addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx); 5806 5807 // Set the high part to be round to zero... 5808 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx) 5809 .addImm(0xC7F); 5810 5811 // Reload the modified control word now... 5812 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5813 5814 // Restore the memory image of control word to original value 5815 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx) 5816 .addReg(OldCW); 5817 5818 // Get the X86 opcode to use. 5819 unsigned Opc; 5820 switch (MI->getOpcode()) { 5821 default: assert(0 && "illegal opcode!"); 5822 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 5823 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 5824 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 5825 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 5826 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 5827 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 5828 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 5829 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 5830 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 5831 } 5832 5833 X86AddressMode AM; 5834 MachineOperand &Op = MI->getOperand(0); 5835 if (Op.isRegister()) { 5836 AM.BaseType = X86AddressMode::RegBase; 5837 AM.Base.Reg = Op.getReg(); 5838 } else { 5839 AM.BaseType = X86AddressMode::FrameIndexBase; 5840 AM.Base.FrameIndex = Op.getIndex(); 5841 } 5842 Op = MI->getOperand(1); 5843 if (Op.isImmediate()) 5844 AM.Scale = Op.getImm(); 5845 Op = MI->getOperand(2); 5846 if (Op.isImmediate()) 5847 AM.IndexReg = Op.getImm(); 5848 Op = MI->getOperand(3); 5849 if (Op.isGlobalAddress()) { 5850 AM.GV = Op.getGlobal(); 5851 } else { 5852 AM.Disp = Op.getImm(); 5853 } 5854 addFullAddress(BuildMI(BB, TII->get(Opc)), AM) 5855 .addReg(MI->getOperand(4).getReg()); 5856 5857 // Reload the original control word now. 5858 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5859 5860 delete MI; // The pseudo instruction is gone now. 5861 return BB; 5862 } 5863 } 5864} 5865 5866//===----------------------------------------------------------------------===// 5867// X86 Optimization Hooks 5868//===----------------------------------------------------------------------===// 5869 5870void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 5871 const APInt &Mask, 5872 APInt &KnownZero, 5873 APInt &KnownOne, 5874 const SelectionDAG &DAG, 5875 unsigned Depth) const { 5876 unsigned Opc = Op.getOpcode(); 5877 assert((Opc >= ISD::BUILTIN_OP_END || 5878 Opc == ISD::INTRINSIC_WO_CHAIN || 5879 Opc == ISD::INTRINSIC_W_CHAIN || 5880 Opc == ISD::INTRINSIC_VOID) && 5881 "Should use MaskedValueIsZero if you don't know whether Op" 5882 " is a target node!"); 5883 5884 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything. 5885 switch (Opc) { 5886 default: break; 5887 case X86ISD::SETCC: 5888 KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(), 5889 Mask.getBitWidth() - 1); 5890 break; 5891 } 5892} 5893 5894/// getShuffleScalarElt - Returns the scalar element that will make up the ith 5895/// element of the result of the vector shuffle. 5896static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) { 5897 MVT::ValueType VT = N->getValueType(0); 5898 SDOperand PermMask = N->getOperand(2); 5899 unsigned NumElems = PermMask.getNumOperands(); 5900 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1); 5901 i %= NumElems; 5902 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5903 return (i == 0) 5904 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5905 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) { 5906 SDOperand Idx = PermMask.getOperand(i); 5907 if (Idx.getOpcode() == ISD::UNDEF) 5908 return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5909 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG); 5910 } 5911 return SDOperand(); 5912} 5913 5914/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 5915/// node is a GlobalAddress + an offset. 5916static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) { 5917 unsigned Opc = N->getOpcode(); 5918 if (Opc == X86ISD::Wrapper) { 5919 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) { 5920 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 5921 return true; 5922 } 5923 } else if (Opc == ISD::ADD) { 5924 SDOperand N1 = N->getOperand(0); 5925 SDOperand N2 = N->getOperand(1); 5926 if (isGAPlusOffset(N1.Val, GA, Offset)) { 5927 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 5928 if (V) { 5929 Offset += V->getSignExtended(); 5930 return true; 5931 } 5932 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 5933 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 5934 if (V) { 5935 Offset += V->getSignExtended(); 5936 return true; 5937 } 5938 } 5939 } 5940 return false; 5941} 5942 5943/// isConsecutiveLoad - Returns true if N is loading from an address of Base 5944/// + Dist * Size. 5945static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size, 5946 MachineFrameInfo *MFI) { 5947 if (N->getOperand(0).Val != Base->getOperand(0).Val) 5948 return false; 5949 5950 SDOperand Loc = N->getOperand(1); 5951 SDOperand BaseLoc = Base->getOperand(1); 5952 if (Loc.getOpcode() == ISD::FrameIndex) { 5953 if (BaseLoc.getOpcode() != ISD::FrameIndex) 5954 return false; 5955 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 5956 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 5957 int FS = MFI->getObjectSize(FI); 5958 int BFS = MFI->getObjectSize(BFI); 5959 if (FS != BFS || FS != Size) return false; 5960 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size); 5961 } else { 5962 GlobalValue *GV1 = NULL; 5963 GlobalValue *GV2 = NULL; 5964 int64_t Offset1 = 0; 5965 int64_t Offset2 = 0; 5966 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 5967 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 5968 if (isGA1 && isGA2 && GV1 == GV2) 5969 return Offset1 == (Offset2 + Dist*Size); 5970 } 5971 5972 return false; 5973} 5974 5975static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI, 5976 const X86Subtarget *Subtarget) { 5977 GlobalValue *GV; 5978 int64_t Offset = 0; 5979 if (isGAPlusOffset(Base, GV, Offset)) 5980 return (GV->getAlignment() >= 16 && (Offset % 16) == 0); 5981 // DAG combine handles the stack object case. 5982 return false; 5983} 5984 5985 5986/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 5987/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 5988/// if the load addresses are consecutive, non-overlapping, and in the right 5989/// order. 5990static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 5991 const X86Subtarget *Subtarget) { 5992 MachineFunction &MF = DAG.getMachineFunction(); 5993 MachineFrameInfo *MFI = MF.getFrameInfo(); 5994 MVT::ValueType VT = N->getValueType(0); 5995 MVT::ValueType EVT = MVT::getVectorElementType(VT); 5996 SDOperand PermMask = N->getOperand(2); 5997 int NumElems = (int)PermMask.getNumOperands(); 5998 SDNode *Base = NULL; 5999 for (int i = 0; i < NumElems; ++i) { 6000 SDOperand Idx = PermMask.getOperand(i); 6001 if (Idx.getOpcode() == ISD::UNDEF) { 6002 if (!Base) return SDOperand(); 6003 } else { 6004 SDOperand Arg = 6005 getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG); 6006 if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val)) 6007 return SDOperand(); 6008 if (!Base) 6009 Base = Arg.Val; 6010 else if (!isConsecutiveLoad(Arg.Val, Base, 6011 i, MVT::getSizeInBits(EVT)/8,MFI)) 6012 return SDOperand(); 6013 } 6014 } 6015 6016 bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget); 6017 LoadSDNode *LD = cast<LoadSDNode>(Base); 6018 if (isAlign16) { 6019 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 6020 LD->getSrcValueOffset(), LD->isVolatile()); 6021 } else { 6022 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 6023 LD->getSrcValueOffset(), LD->isVolatile(), 6024 LD->getAlignment()); 6025 } 6026} 6027 6028/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 6029static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 6030 const X86Subtarget *Subtarget) { 6031 SDOperand Cond = N->getOperand(0); 6032 6033 // If we have SSE[12] support, try to form min/max nodes. 6034 if (Subtarget->hasSSE2() && 6035 (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) { 6036 if (Cond.getOpcode() == ISD::SETCC) { 6037 // Get the LHS/RHS of the select. 6038 SDOperand LHS = N->getOperand(1); 6039 SDOperand RHS = N->getOperand(2); 6040 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 6041 6042 unsigned Opcode = 0; 6043 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) { 6044 switch (CC) { 6045 default: break; 6046 case ISD::SETOLE: // (X <= Y) ? X : Y -> min 6047 case ISD::SETULE: 6048 case ISD::SETLE: 6049 if (!UnsafeFPMath) break; 6050 // FALL THROUGH. 6051 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min 6052 case ISD::SETLT: 6053 Opcode = X86ISD::FMIN; 6054 break; 6055 6056 case ISD::SETOGT: // (X > Y) ? X : Y -> max 6057 case ISD::SETUGT: 6058 case ISD::SETGT: 6059 if (!UnsafeFPMath) break; 6060 // FALL THROUGH. 6061 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max 6062 case ISD::SETGE: 6063 Opcode = X86ISD::FMAX; 6064 break; 6065 } 6066 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) { 6067 switch (CC) { 6068 default: break; 6069 case ISD::SETOGT: // (X > Y) ? Y : X -> min 6070 case ISD::SETUGT: 6071 case ISD::SETGT: 6072 if (!UnsafeFPMath) break; 6073 // FALL THROUGH. 6074 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min 6075 case ISD::SETGE: 6076 Opcode = X86ISD::FMIN; 6077 break; 6078 6079 case ISD::SETOLE: // (X <= Y) ? Y : X -> max 6080 case ISD::SETULE: 6081 case ISD::SETLE: 6082 if (!UnsafeFPMath) break; 6083 // FALL THROUGH. 6084 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max 6085 case ISD::SETLT: 6086 Opcode = X86ISD::FMAX; 6087 break; 6088 } 6089 } 6090 6091 if (Opcode) 6092 return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS); 6093 } 6094 6095 } 6096 6097 return SDOperand(); 6098} 6099 6100/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 6101static SDOperand PerformSTORECombine(StoreSDNode *St, SelectionDAG &DAG, 6102 const X86Subtarget *Subtarget) { 6103 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 6104 // the FP state in cases where an emms may be missing. 6105 // A preferable solution to the general problem is to figure out the right 6106 // places to insert EMMS. This qualifies as a quick hack. 6107 if (MVT::isVector(St->getValue().getValueType()) && 6108 MVT::getSizeInBits(St->getValue().getValueType()) == 64 && 6109 isa<LoadSDNode>(St->getValue()) && 6110 !cast<LoadSDNode>(St->getValue())->isVolatile() && 6111 St->getChain().hasOneUse() && !St->isVolatile()) { 6112 SDNode* LdVal = St->getValue().Val; 6113 LoadSDNode *Ld = 0; 6114 int TokenFactorIndex = -1; 6115 SmallVector<SDOperand, 8> Ops; 6116 SDNode* ChainVal = St->getChain().Val; 6117 // Must be a store of a load. We currently handle two cases: the load 6118 // is a direct child, and it's under an intervening TokenFactor. It is 6119 // possible to dig deeper under nested TokenFactors. 6120 if (ChainVal == LdVal) 6121 Ld = cast<LoadSDNode>(St->getChain()); 6122 else if (St->getValue().hasOneUse() && 6123 ChainVal->getOpcode() == ISD::TokenFactor) { 6124 for (unsigned i=0, e = ChainVal->getNumOperands(); i != e; ++i) { 6125 if (ChainVal->getOperand(i).Val == LdVal) { 6126 TokenFactorIndex = i; 6127 Ld = cast<LoadSDNode>(St->getValue()); 6128 } else 6129 Ops.push_back(ChainVal->getOperand(i)); 6130 } 6131 } 6132 if (Ld) { 6133 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 6134 if (Subtarget->is64Bit()) { 6135 SDOperand NewLd = DAG.getLoad(MVT::i64, Ld->getChain(), 6136 Ld->getBasePtr(), Ld->getSrcValue(), 6137 Ld->getSrcValueOffset(), Ld->isVolatile(), 6138 Ld->getAlignment()); 6139 SDOperand NewChain = NewLd.getValue(1); 6140 if (TokenFactorIndex != -1) { 6141 Ops.push_back(NewLd); 6142 NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0], 6143 Ops.size()); 6144 } 6145 return DAG.getStore(NewChain, NewLd, St->getBasePtr(), 6146 St->getSrcValue(), St->getSrcValueOffset(), 6147 St->isVolatile(), St->getAlignment()); 6148 } 6149 6150 // Otherwise, lower to two 32-bit copies. 6151 SDOperand LoAddr = Ld->getBasePtr(); 6152 SDOperand HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, 6153 DAG.getConstant(MVT::i32, 4)); 6154 6155 SDOperand LoLd = DAG.getLoad(MVT::i32, Ld->getChain(), LoAddr, 6156 Ld->getSrcValue(), Ld->getSrcValueOffset(), 6157 Ld->isVolatile(), Ld->getAlignment()); 6158 SDOperand HiLd = DAG.getLoad(MVT::i32, Ld->getChain(), HiAddr, 6159 Ld->getSrcValue(), Ld->getSrcValueOffset()+4, 6160 Ld->isVolatile(), 6161 MinAlign(Ld->getAlignment(), 4)); 6162 6163 SDOperand NewChain = LoLd.getValue(1); 6164 if (TokenFactorIndex != -1) { 6165 Ops.push_back(LoLd); 6166 Ops.push_back(HiLd); 6167 NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0], 6168 Ops.size()); 6169 } 6170 6171 LoAddr = St->getBasePtr(); 6172 HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, 6173 DAG.getConstant(MVT::i32, 4)); 6174 6175 SDOperand LoSt = DAG.getStore(NewChain, LoLd, LoAddr, 6176 St->getSrcValue(), St->getSrcValueOffset(), 6177 St->isVolatile(), St->getAlignment()); 6178 SDOperand HiSt = DAG.getStore(NewChain, HiLd, HiAddr, 6179 St->getSrcValue(), St->getSrcValueOffset()+4, 6180 St->isVolatile(), 6181 MinAlign(St->getAlignment(), 4)); 6182 return DAG.getNode(ISD::TokenFactor, MVT::Other, LoSt, HiSt); 6183 } 6184 } 6185 return SDOperand(); 6186} 6187 6188/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 6189/// X86ISD::FXOR nodes. 6190static SDOperand PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 6191 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 6192 // F[X]OR(0.0, x) -> x 6193 // F[X]OR(x, 0.0) -> x 6194 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 6195 if (C->getValueAPF().isPosZero()) 6196 return N->getOperand(1); 6197 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 6198 if (C->getValueAPF().isPosZero()) 6199 return N->getOperand(0); 6200 return SDOperand(); 6201} 6202 6203/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 6204static SDOperand PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 6205 // FAND(0.0, x) -> 0.0 6206 // FAND(x, 0.0) -> 0.0 6207 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 6208 if (C->getValueAPF().isPosZero()) 6209 return N->getOperand(0); 6210 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 6211 if (C->getValueAPF().isPosZero()) 6212 return N->getOperand(1); 6213 return SDOperand(); 6214} 6215 6216 6217SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, 6218 DAGCombinerInfo &DCI) const { 6219 SelectionDAG &DAG = DCI.DAG; 6220 switch (N->getOpcode()) { 6221 default: break; 6222 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, Subtarget); 6223 case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget); 6224 case ISD::STORE: 6225 return PerformSTORECombine(cast<StoreSDNode>(N), DAG, Subtarget); 6226 case X86ISD::FXOR: 6227 case X86ISD::FOR: return PerformFORCombine(N, DAG); 6228 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 6229 } 6230 6231 return SDOperand(); 6232} 6233 6234//===----------------------------------------------------------------------===// 6235// X86 Inline Assembly Support 6236//===----------------------------------------------------------------------===// 6237 6238/// getConstraintType - Given a constraint letter, return the type of 6239/// constraint it is for this target. 6240X86TargetLowering::ConstraintType 6241X86TargetLowering::getConstraintType(const std::string &Constraint) const { 6242 if (Constraint.size() == 1) { 6243 switch (Constraint[0]) { 6244 case 'A': 6245 case 'r': 6246 case 'R': 6247 case 'l': 6248 case 'q': 6249 case 'Q': 6250 case 'x': 6251 case 'Y': 6252 return C_RegisterClass; 6253 default: 6254 break; 6255 } 6256 } 6257 return TargetLowering::getConstraintType(Constraint); 6258} 6259 6260/// LowerXConstraint - try to replace an X constraint, which matches anything, 6261/// with another that has more specific requirements based on the type of the 6262/// corresponding operand. 6263void X86TargetLowering::lowerXConstraint(MVT::ValueType ConstraintVT, 6264 std::string& s) const { 6265 if (MVT::isFloatingPoint(ConstraintVT)) { 6266 if (Subtarget->hasSSE2()) 6267 s = "Y"; 6268 else if (Subtarget->hasSSE1()) 6269 s = "x"; 6270 else 6271 s = "f"; 6272 } else 6273 return TargetLowering::lowerXConstraint(ConstraintVT, s); 6274} 6275 6276/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 6277/// vector. If it is invalid, don't add anything to Ops. 6278void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, 6279 char Constraint, 6280 std::vector<SDOperand>&Ops, 6281 SelectionDAG &DAG) { 6282 SDOperand Result(0, 0); 6283 6284 switch (Constraint) { 6285 default: break; 6286 case 'I': 6287 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 6288 if (C->getValue() <= 31) { 6289 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 6290 break; 6291 } 6292 } 6293 return; 6294 case 'N': 6295 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 6296 if (C->getValue() <= 255) { 6297 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 6298 break; 6299 } 6300 } 6301 return; 6302 case 'i': { 6303 // Literal immediates are always ok. 6304 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 6305 Result = DAG.getTargetConstant(CST->getValue(), Op.getValueType()); 6306 break; 6307 } 6308 6309 // If we are in non-pic codegen mode, we allow the address of a global (with 6310 // an optional displacement) to be used with 'i'. 6311 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 6312 int64_t Offset = 0; 6313 6314 // Match either (GA) or (GA+C) 6315 if (GA) { 6316 Offset = GA->getOffset(); 6317 } else if (Op.getOpcode() == ISD::ADD) { 6318 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6319 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 6320 if (C && GA) { 6321 Offset = GA->getOffset()+C->getValue(); 6322 } else { 6323 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6324 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 6325 if (C && GA) 6326 Offset = GA->getOffset()+C->getValue(); 6327 else 6328 C = 0, GA = 0; 6329 } 6330 } 6331 6332 if (GA) { 6333 // If addressing this global requires a load (e.g. in PIC mode), we can't 6334 // match. 6335 if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(), 6336 false)) 6337 return; 6338 6339 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), 6340 Offset); 6341 Result = Op; 6342 break; 6343 } 6344 6345 // Otherwise, not valid for this mode. 6346 return; 6347 } 6348 } 6349 6350 if (Result.Val) { 6351 Ops.push_back(Result); 6352 return; 6353 } 6354 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 6355} 6356 6357std::vector<unsigned> X86TargetLowering:: 6358getRegClassForInlineAsmConstraint(const std::string &Constraint, 6359 MVT::ValueType VT) const { 6360 if (Constraint.size() == 1) { 6361 // FIXME: not handling fp-stack yet! 6362 switch (Constraint[0]) { // GCC X86 Constraint Letters 6363 default: break; // Unknown constraint letter 6364 case 'A': // EAX/EDX 6365 if (VT == MVT::i32 || VT == MVT::i64) 6366 return make_vector<unsigned>(X86::EAX, X86::EDX, 0); 6367 break; 6368 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode) 6369 case 'Q': // Q_REGS 6370 if (VT == MVT::i32) 6371 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 6372 else if (VT == MVT::i16) 6373 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 6374 else if (VT == MVT::i8) 6375 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0); 6376 else if (VT == MVT::i64) 6377 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0); 6378 break; 6379 } 6380 } 6381 6382 return std::vector<unsigned>(); 6383} 6384 6385std::pair<unsigned, const TargetRegisterClass*> 6386X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 6387 MVT::ValueType VT) const { 6388 // First, see if this is a constraint that directly corresponds to an LLVM 6389 // register class. 6390 if (Constraint.size() == 1) { 6391 // GCC Constraint Letters 6392 switch (Constraint[0]) { 6393 default: break; 6394 case 'r': // GENERAL_REGS 6395 case 'R': // LEGACY_REGS 6396 case 'l': // INDEX_REGS 6397 if (VT == MVT::i64 && Subtarget->is64Bit()) 6398 return std::make_pair(0U, X86::GR64RegisterClass); 6399 if (VT == MVT::i32) 6400 return std::make_pair(0U, X86::GR32RegisterClass); 6401 else if (VT == MVT::i16) 6402 return std::make_pair(0U, X86::GR16RegisterClass); 6403 else if (VT == MVT::i8) 6404 return std::make_pair(0U, X86::GR8RegisterClass); 6405 break; 6406 case 'y': // MMX_REGS if MMX allowed. 6407 if (!Subtarget->hasMMX()) break; 6408 return std::make_pair(0U, X86::VR64RegisterClass); 6409 break; 6410 case 'Y': // SSE_REGS if SSE2 allowed 6411 if (!Subtarget->hasSSE2()) break; 6412 // FALL THROUGH. 6413 case 'x': // SSE_REGS if SSE1 allowed 6414 if (!Subtarget->hasSSE1()) break; 6415 6416 switch (VT) { 6417 default: break; 6418 // Scalar SSE types. 6419 case MVT::f32: 6420 case MVT::i32: 6421 return std::make_pair(0U, X86::FR32RegisterClass); 6422 case MVT::f64: 6423 case MVT::i64: 6424 return std::make_pair(0U, X86::FR64RegisterClass); 6425 // Vector types. 6426 case MVT::v16i8: 6427 case MVT::v8i16: 6428 case MVT::v4i32: 6429 case MVT::v2i64: 6430 case MVT::v4f32: 6431 case MVT::v2f64: 6432 return std::make_pair(0U, X86::VR128RegisterClass); 6433 } 6434 break; 6435 } 6436 } 6437 6438 // Use the default implementation in TargetLowering to convert the register 6439 // constraint into a member of a register class. 6440 std::pair<unsigned, const TargetRegisterClass*> Res; 6441 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 6442 6443 // Not found as a standard register? 6444 if (Res.second == 0) { 6445 // GCC calls "st(0)" just plain "st". 6446 if (StringsEqualNoCase("{st}", Constraint)) { 6447 Res.first = X86::ST0; 6448 Res.second = X86::RFP80RegisterClass; 6449 } 6450 6451 return Res; 6452 } 6453 6454 // Otherwise, check to see if this is a register class of the wrong value 6455 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 6456 // turn into {ax},{dx}. 6457 if (Res.second->hasType(VT)) 6458 return Res; // Correct type already, nothing to do. 6459 6460 // All of the single-register GCC register classes map their values onto 6461 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 6462 // really want an 8-bit or 32-bit register, map to the appropriate register 6463 // class and return the appropriate register. 6464 if (Res.second != X86::GR16RegisterClass) 6465 return Res; 6466 6467 if (VT == MVT::i8) { 6468 unsigned DestReg = 0; 6469 switch (Res.first) { 6470 default: break; 6471 case X86::AX: DestReg = X86::AL; break; 6472 case X86::DX: DestReg = X86::DL; break; 6473 case X86::CX: DestReg = X86::CL; break; 6474 case X86::BX: DestReg = X86::BL; break; 6475 } 6476 if (DestReg) { 6477 Res.first = DestReg; 6478 Res.second = Res.second = X86::GR8RegisterClass; 6479 } 6480 } else if (VT == MVT::i32) { 6481 unsigned DestReg = 0; 6482 switch (Res.first) { 6483 default: break; 6484 case X86::AX: DestReg = X86::EAX; break; 6485 case X86::DX: DestReg = X86::EDX; break; 6486 case X86::CX: DestReg = X86::ECX; break; 6487 case X86::BX: DestReg = X86::EBX; break; 6488 case X86::SI: DestReg = X86::ESI; break; 6489 case X86::DI: DestReg = X86::EDI; break; 6490 case X86::BP: DestReg = X86::EBP; break; 6491 case X86::SP: DestReg = X86::ESP; break; 6492 } 6493 if (DestReg) { 6494 Res.first = DestReg; 6495 Res.second = Res.second = X86::GR32RegisterClass; 6496 } 6497 } else if (VT == MVT::i64) { 6498 unsigned DestReg = 0; 6499 switch (Res.first) { 6500 default: break; 6501 case X86::AX: DestReg = X86::RAX; break; 6502 case X86::DX: DestReg = X86::RDX; break; 6503 case X86::CX: DestReg = X86::RCX; break; 6504 case X86::BX: DestReg = X86::RBX; break; 6505 case X86::SI: DestReg = X86::RSI; break; 6506 case X86::DI: DestReg = X86::RDI; break; 6507 case X86::BP: DestReg = X86::RBP; break; 6508 case X86::SP: DestReg = X86::RSP; break; 6509 } 6510 if (DestReg) { 6511 Res.first = DestReg; 6512 Res.second = Res.second = X86::GR64RegisterClass; 6513 } 6514 } 6515 6516 return Res; 6517} 6518