X86ISelLowering.cpp revision 27b7db549e4c5bff4579d209304de5628513edeb
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86ISelLowering.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86TargetMachine.h" 20#include "llvm/CallingConv.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/ADT/BitVector.h" 27#include "llvm/ADT/VectorExtras.h" 28#include "llvm/Analysis/ScalarEvolutionExpressions.h" 29#include "llvm/CodeGen/CallingConvLower.h" 30#include "llvm/CodeGen/MachineFrameInfo.h" 31#include "llvm/CodeGen/MachineFunction.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/MachineModuleInfo.h" 34#include "llvm/CodeGen/MachineRegisterInfo.h" 35#include "llvm/CodeGen/PseudoSourceValue.h" 36#include "llvm/CodeGen/SelectionDAG.h" 37#include "llvm/Support/MathExtras.h" 38#include "llvm/Support/Debug.h" 39#include "llvm/Target/TargetOptions.h" 40#include "llvm/ADT/SmallSet.h" 41#include "llvm/ADT/StringExtras.h" 42#include "llvm/ParamAttrsList.h" 43using namespace llvm; 44 45X86TargetLowering::X86TargetLowering(TargetMachine &TM) 46 : TargetLowering(TM) { 47 Subtarget = &TM.getSubtarget<X86Subtarget>(); 48 X86ScalarSSEf64 = Subtarget->hasSSE2(); 49 X86ScalarSSEf32 = Subtarget->hasSSE1(); 50 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 51 52 bool Fast = false; 53 54 RegInfo = TM.getRegisterInfo(); 55 56 // Set up the TargetLowering object. 57 58 // X86 is weird, it always uses i8 for shift amounts and setcc results. 59 setShiftAmountType(MVT::i8); 60 setSetCCResultType(MVT::i8); 61 setSetCCResultContents(ZeroOrOneSetCCResult); 62 setSchedulingPreference(SchedulingForRegPressure); 63 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0 64 setStackPointerRegisterToSaveRestore(X86StackPtr); 65 66 if (Subtarget->isTargetDarwin()) { 67 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 68 setUseUnderscoreSetJmp(false); 69 setUseUnderscoreLongJmp(false); 70 } else if (Subtarget->isTargetMingw()) { 71 // MS runtime is weird: it exports _setjmp, but longjmp! 72 setUseUnderscoreSetJmp(true); 73 setUseUnderscoreLongJmp(false); 74 } else { 75 setUseUnderscoreSetJmp(true); 76 setUseUnderscoreLongJmp(true); 77 } 78 79 // Set up the register classes. 80 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 81 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 82 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 83 if (Subtarget->is64Bit()) 84 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 85 86 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote); 87 88 // We don't accept any truncstore of integer registers. 89 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 90 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 91 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 92 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 93 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 94 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 95 96 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 97 // operation. 98 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 99 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 100 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 101 102 if (Subtarget->is64Bit()) { 103 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 104 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 105 } else { 106 if (X86ScalarSSEf64) 107 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP. 108 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand); 109 else 110 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 111 } 112 113 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 114 // this operation. 115 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 116 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 117 // SSE has no i16 to fp conversion, only i32 118 if (X86ScalarSSEf32) { 119 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 120 // f32 and f64 cases are Legal, f80 case is not 121 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 122 } else { 123 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 124 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 125 } 126 127 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 128 // are Legal, f80 is custom lowered. 129 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 130 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 131 132 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 133 // this operation. 134 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 135 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 136 137 if (X86ScalarSSEf32) { 138 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 139 // f32 and f64 cases are Legal, f80 case is not 140 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 141 } else { 142 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 143 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 144 } 145 146 // Handle FP_TO_UINT by promoting the destination to a larger signed 147 // conversion. 148 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 149 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 150 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 151 152 if (Subtarget->is64Bit()) { 153 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 154 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 155 } else { 156 if (X86ScalarSSEf32 && !Subtarget->hasSSE3()) 157 // Expand FP_TO_UINT into a select. 158 // FIXME: We would like to use a Custom expander here eventually to do 159 // the optimal thing for SSE vs. the default expansion in the legalizer. 160 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 161 else 162 // With SSE3 we can use fisttpll to convert to a signed i64. 163 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 164 } 165 166 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 167 if (!X86ScalarSSEf64) { 168 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 169 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 170 } 171 172 // Scalar integer divide and remainder are lowered to use operations that 173 // produce two results, to match the available instructions. This exposes 174 // the two-result form to trivial CSE, which is able to combine x/y and x%y 175 // into a single instruction. 176 // 177 // Scalar integer multiply-high is also lowered to use two-result 178 // operations, to match the available instructions. However, plain multiply 179 // (low) operations are left as Legal, as there are single-result 180 // instructions for this in x86. Using the two-result multiply instructions 181 // when both high and low results are needed must be arranged by dagcombine. 182 setOperationAction(ISD::MULHS , MVT::i8 , Expand); 183 setOperationAction(ISD::MULHU , MVT::i8 , Expand); 184 setOperationAction(ISD::SDIV , MVT::i8 , Expand); 185 setOperationAction(ISD::UDIV , MVT::i8 , Expand); 186 setOperationAction(ISD::SREM , MVT::i8 , Expand); 187 setOperationAction(ISD::UREM , MVT::i8 , Expand); 188 setOperationAction(ISD::MULHS , MVT::i16 , Expand); 189 setOperationAction(ISD::MULHU , MVT::i16 , Expand); 190 setOperationAction(ISD::SDIV , MVT::i16 , Expand); 191 setOperationAction(ISD::UDIV , MVT::i16 , Expand); 192 setOperationAction(ISD::SREM , MVT::i16 , Expand); 193 setOperationAction(ISD::UREM , MVT::i16 , Expand); 194 setOperationAction(ISD::MULHS , MVT::i32 , Expand); 195 setOperationAction(ISD::MULHU , MVT::i32 , Expand); 196 setOperationAction(ISD::SDIV , MVT::i32 , Expand); 197 setOperationAction(ISD::UDIV , MVT::i32 , Expand); 198 setOperationAction(ISD::SREM , MVT::i32 , Expand); 199 setOperationAction(ISD::UREM , MVT::i32 , Expand); 200 setOperationAction(ISD::MULHS , MVT::i64 , Expand); 201 setOperationAction(ISD::MULHU , MVT::i64 , Expand); 202 setOperationAction(ISD::SDIV , MVT::i64 , Expand); 203 setOperationAction(ISD::UDIV , MVT::i64 , Expand); 204 setOperationAction(ISD::SREM , MVT::i64 , Expand); 205 setOperationAction(ISD::UREM , MVT::i64 , Expand); 206 207 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 208 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 209 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 210 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 211 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); 212 if (Subtarget->is64Bit()) 213 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 214 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 215 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 216 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 217 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 218 setOperationAction(ISD::FREM , MVT::f32 , Expand); 219 setOperationAction(ISD::FREM , MVT::f64 , Expand); 220 setOperationAction(ISD::FREM , MVT::f80 , Expand); 221 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 222 223 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 224 setOperationAction(ISD::CTTZ , MVT::i8 , Custom); 225 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 226 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 227 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 228 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 229 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 230 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 231 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 232 if (Subtarget->is64Bit()) { 233 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 234 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 235 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 236 } 237 238 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 239 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 240 241 // These should be promoted to a larger select which is supported. 242 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 243 setOperationAction(ISD::SELECT , MVT::i8 , Promote); 244 // X86 wants to expand cmov itself. 245 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 246 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 247 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 248 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 249 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 250 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 251 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 252 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 253 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 254 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 255 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 256 if (Subtarget->is64Bit()) { 257 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 258 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 259 } 260 // X86 ret instruction may pop stack. 261 setOperationAction(ISD::RET , MVT::Other, Custom); 262 if (!Subtarget->is64Bit()) 263 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 264 265 // Darwin ABI issue. 266 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 267 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 268 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 269 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 270 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 271 if (Subtarget->is64Bit()) { 272 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 273 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 274 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 275 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 276 } 277 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 278 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 279 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 280 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 281 if (Subtarget->is64Bit()) { 282 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 283 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 284 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 285 } 286 // X86 wants to expand memset / memcpy itself. 287 setOperationAction(ISD::MEMSET , MVT::Other, Custom); 288 setOperationAction(ISD::MEMCPY , MVT::Other, Custom); 289 290 if (!Subtarget->hasSSE1()) 291 setOperationAction(ISD::PREFETCH , MVT::Other, Expand); 292 293 if (!Subtarget->hasSSE2()) 294 setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand); 295 296 setOperationAction(ISD::ATOMIC_LCS , MVT::i8, Custom); 297 setOperationAction(ISD::ATOMIC_LCS , MVT::i16, Custom); 298 setOperationAction(ISD::ATOMIC_LCS , MVT::i32, Custom); 299 setOperationAction(ISD::ATOMIC_LCS , MVT::i64, Custom); 300 301 // Use the default ISD::LOCATION, ISD::DECLARE expansion. 302 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 303 // FIXME - use subtarget debug flags 304 if (!Subtarget->isTargetDarwin() && 305 !Subtarget->isTargetELF() && 306 !Subtarget->isTargetCygMing()) 307 setOperationAction(ISD::LABEL, MVT::Other, Expand); 308 309 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 310 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 311 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 312 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 313 if (Subtarget->is64Bit()) { 314 // FIXME: Verify 315 setExceptionPointerRegister(X86::RAX); 316 setExceptionSelectorRegister(X86::RDX); 317 } else { 318 setExceptionPointerRegister(X86::EAX); 319 setExceptionSelectorRegister(X86::EDX); 320 } 321 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 322 323 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 324 325 setOperationAction(ISD::TRAP, MVT::Other, Legal); 326 327 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 328 setOperationAction(ISD::VASTART , MVT::Other, Custom); 329 setOperationAction(ISD::VAARG , MVT::Other, Expand); 330 setOperationAction(ISD::VAEND , MVT::Other, Expand); 331 if (Subtarget->is64Bit()) 332 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 333 else 334 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 335 336 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 337 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 338 if (Subtarget->is64Bit()) 339 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 340 if (Subtarget->isTargetCygMing()) 341 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 342 else 343 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 344 345 if (X86ScalarSSEf64) { 346 // f32 and f64 use SSE. 347 // Set up the FP register classes. 348 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 349 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 350 351 // Use ANDPD to simulate FABS. 352 setOperationAction(ISD::FABS , MVT::f64, Custom); 353 setOperationAction(ISD::FABS , MVT::f32, Custom); 354 355 // Use XORP to simulate FNEG. 356 setOperationAction(ISD::FNEG , MVT::f64, Custom); 357 setOperationAction(ISD::FNEG , MVT::f32, Custom); 358 359 // Use ANDPD and ORPD to simulate FCOPYSIGN. 360 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 361 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 362 363 // We don't support sin/cos/fmod 364 setOperationAction(ISD::FSIN , MVT::f64, Expand); 365 setOperationAction(ISD::FCOS , MVT::f64, Expand); 366 setOperationAction(ISD::FSIN , MVT::f32, Expand); 367 setOperationAction(ISD::FCOS , MVT::f32, Expand); 368 369 // Expand FP immediates into loads from the stack, except for the special 370 // cases we handle. 371 addLegalFPImmediate(APFloat(+0.0)); // xorpd 372 addLegalFPImmediate(APFloat(+0.0f)); // xorps 373 374 // Floating truncations from f80 and extensions to f80 go through memory. 375 // If optimizing, we lie about this though and handle it in 376 // InstructionSelectPreprocess so that dagcombine2 can hack on these. 377 if (Fast) { 378 setConvertAction(MVT::f32, MVT::f80, Expand); 379 setConvertAction(MVT::f64, MVT::f80, Expand); 380 setConvertAction(MVT::f80, MVT::f32, Expand); 381 setConvertAction(MVT::f80, MVT::f64, Expand); 382 } 383 } else if (X86ScalarSSEf32) { 384 // Use SSE for f32, x87 for f64. 385 // Set up the FP register classes. 386 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 387 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 388 389 // Use ANDPS to simulate FABS. 390 setOperationAction(ISD::FABS , MVT::f32, Custom); 391 392 // Use XORP to simulate FNEG. 393 setOperationAction(ISD::FNEG , MVT::f32, Custom); 394 395 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 396 397 // Use ANDPS and ORPS to simulate FCOPYSIGN. 398 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 399 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 400 401 // We don't support sin/cos/fmod 402 setOperationAction(ISD::FSIN , MVT::f32, Expand); 403 setOperationAction(ISD::FCOS , MVT::f32, Expand); 404 405 // Special cases we handle for FP constants. 406 addLegalFPImmediate(APFloat(+0.0f)); // xorps 407 addLegalFPImmediate(APFloat(+0.0)); // FLD0 408 addLegalFPImmediate(APFloat(+1.0)); // FLD1 409 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 410 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 411 412 // SSE <-> X87 conversions go through memory. If optimizing, we lie about 413 // this though and handle it in InstructionSelectPreprocess so that 414 // dagcombine2 can hack on these. 415 if (Fast) { 416 setConvertAction(MVT::f32, MVT::f64, Expand); 417 setConvertAction(MVT::f32, MVT::f80, Expand); 418 setConvertAction(MVT::f80, MVT::f32, Expand); 419 setConvertAction(MVT::f64, MVT::f32, Expand); 420 // And x87->x87 truncations also. 421 setConvertAction(MVT::f80, MVT::f64, Expand); 422 } 423 424 if (!UnsafeFPMath) { 425 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 426 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 427 } 428 } else { 429 // f32 and f64 in x87. 430 // Set up the FP register classes. 431 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 432 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 433 434 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 435 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 436 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 437 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 438 439 // Floating truncations go through memory. If optimizing, we lie about 440 // this though and handle it in InstructionSelectPreprocess so that 441 // dagcombine2 can hack on these. 442 if (Fast) { 443 setConvertAction(MVT::f80, MVT::f32, Expand); 444 setConvertAction(MVT::f64, MVT::f32, Expand); 445 setConvertAction(MVT::f80, MVT::f64, Expand); 446 } 447 448 if (!UnsafeFPMath) { 449 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 450 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 451 } 452 addLegalFPImmediate(APFloat(+0.0)); // FLD0 453 addLegalFPImmediate(APFloat(+1.0)); // FLD1 454 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 455 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 456 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 457 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 458 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 459 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 460 } 461 462 // Long double always uses X87. 463 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 464 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 465 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 466 { 467 APFloat TmpFlt(+0.0); 468 TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 469 addLegalFPImmediate(TmpFlt); // FLD0 470 TmpFlt.changeSign(); 471 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 472 APFloat TmpFlt2(+1.0); 473 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 474 addLegalFPImmediate(TmpFlt2); // FLD1 475 TmpFlt2.changeSign(); 476 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 477 } 478 479 if (!UnsafeFPMath) { 480 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 481 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 482 } 483 484 // Always use a library call for pow. 485 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 486 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 487 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 488 489 // First set operation action for all vector types to expand. Then we 490 // will selectively turn on ones that can be effectively codegen'd. 491 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 492 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 493 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); 494 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); 495 setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); 496 setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); 497 setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); 498 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 499 setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); 500 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 501 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 502 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 503 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 504 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 505 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); 506 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); 507 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 508 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 509 setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand); 510 setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand); 511 setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand); 512 setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand); 513 setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand); 514 setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand); 515 setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand); 516 setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand); 517 setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand); 518 setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand); 519 setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand); 520 setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand); 521 setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand); 522 setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand); 523 setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand); 524 setOperationAction(ISD::SHL, (MVT::ValueType)VT, Expand); 525 setOperationAction(ISD::SRA, (MVT::ValueType)VT, Expand); 526 setOperationAction(ISD::SRL, (MVT::ValueType)VT, Expand); 527 setOperationAction(ISD::ROTL, (MVT::ValueType)VT, Expand); 528 setOperationAction(ISD::ROTR, (MVT::ValueType)VT, Expand); 529 setOperationAction(ISD::BSWAP, (MVT::ValueType)VT, Expand); 530 } 531 532 if (Subtarget->hasMMX()) { 533 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass); 534 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass); 535 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass); 536 addRegisterClass(MVT::v1i64, X86::VR64RegisterClass); 537 538 // FIXME: add MMX packed arithmetics 539 540 setOperationAction(ISD::ADD, MVT::v8i8, Legal); 541 setOperationAction(ISD::ADD, MVT::v4i16, Legal); 542 setOperationAction(ISD::ADD, MVT::v2i32, Legal); 543 setOperationAction(ISD::ADD, MVT::v1i64, Legal); 544 545 setOperationAction(ISD::SUB, MVT::v8i8, Legal); 546 setOperationAction(ISD::SUB, MVT::v4i16, Legal); 547 setOperationAction(ISD::SUB, MVT::v2i32, Legal); 548 setOperationAction(ISD::SUB, MVT::v1i64, Legal); 549 550 setOperationAction(ISD::MULHS, MVT::v4i16, Legal); 551 setOperationAction(ISD::MUL, MVT::v4i16, Legal); 552 553 setOperationAction(ISD::AND, MVT::v8i8, Promote); 554 AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64); 555 setOperationAction(ISD::AND, MVT::v4i16, Promote); 556 AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64); 557 setOperationAction(ISD::AND, MVT::v2i32, Promote); 558 AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64); 559 setOperationAction(ISD::AND, MVT::v1i64, Legal); 560 561 setOperationAction(ISD::OR, MVT::v8i8, Promote); 562 AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64); 563 setOperationAction(ISD::OR, MVT::v4i16, Promote); 564 AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64); 565 setOperationAction(ISD::OR, MVT::v2i32, Promote); 566 AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64); 567 setOperationAction(ISD::OR, MVT::v1i64, Legal); 568 569 setOperationAction(ISD::XOR, MVT::v8i8, Promote); 570 AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64); 571 setOperationAction(ISD::XOR, MVT::v4i16, Promote); 572 AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64); 573 setOperationAction(ISD::XOR, MVT::v2i32, Promote); 574 AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64); 575 setOperationAction(ISD::XOR, MVT::v1i64, Legal); 576 577 setOperationAction(ISD::LOAD, MVT::v8i8, Promote); 578 AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64); 579 setOperationAction(ISD::LOAD, MVT::v4i16, Promote); 580 AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64); 581 setOperationAction(ISD::LOAD, MVT::v2i32, Promote); 582 AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64); 583 setOperationAction(ISD::LOAD, MVT::v1i64, Legal); 584 585 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); 586 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 587 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); 588 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); 589 590 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 591 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 592 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); 593 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); 594 595 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom); 596 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom); 597 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom); 598 } 599 600 if (Subtarget->hasSSE1()) { 601 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 602 603 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 604 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 605 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 606 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 607 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 608 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 609 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 610 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 611 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 612 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 613 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 614 } 615 616 if (Subtarget->hasSSE2()) { 617 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 618 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 619 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 620 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 621 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 622 623 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 624 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 625 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 626 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 627 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 628 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 629 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 630 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 631 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 632 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 633 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 634 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 635 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 636 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 637 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 638 639 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 640 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 641 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 642 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 643 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 644 645 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 646 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 647 // Do not attempt to custom lower non-power-of-2 vectors 648 if (!isPowerOf2_32(MVT::getVectorNumElements(VT))) 649 continue; 650 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); 651 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); 652 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); 653 } 654 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 655 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 656 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 657 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 658 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 659 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 660 if (Subtarget->is64Bit()) { 661 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 662 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 663 } 664 665 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 666 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 667 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); 668 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); 669 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); 670 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); 671 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); 672 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); 673 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); 674 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); 675 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 676 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); 677 } 678 679 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 680 681 // Custom lower v2i64 and v2f64 selects. 682 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 683 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 684 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 685 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 686 } 687 688 if (Subtarget->hasSSE41()) { 689 // FIXME: Do we need to handle scalar-to-vector here? 690 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 691 692 // i8 and i16 vectors are custom , because the source register and source 693 // source memory operand types are not the same width. f32 vectors are 694 // custom since the immediate controlling the insert encodes additional 695 // information. 696 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 697 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 698 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Legal); 699 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 700 701 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 702 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 703 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 704 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 705 706 if (Subtarget->is64Bit()) { 707 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal); 708 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 709 } 710 } 711 712 // We want to custom lower some of our intrinsics. 713 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 714 715 // We have target-specific dag combine patterns for the following nodes: 716 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 717 setTargetDAGCombine(ISD::SELECT); 718 setTargetDAGCombine(ISD::STORE); 719 720 computeRegisterProperties(); 721 722 // FIXME: These should be based on subtarget info. Plus, the values should 723 // be smaller when we are in optimizing for size mode. 724 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores 725 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores 726 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores 727 allowUnalignedMemoryAccesses = true; // x86 supports it! 728 setPrefLoopAlignment(16); 729} 730 731/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 732/// the desired ByVal argument alignment. 733static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) { 734 if (MaxAlign == 16) 735 return; 736 if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 737 if (VTy->getBitWidth() == 128) 738 MaxAlign = 16; 739 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 740 unsigned EltAlign = 0; 741 getMaxByValAlign(ATy->getElementType(), EltAlign); 742 if (EltAlign > MaxAlign) 743 MaxAlign = EltAlign; 744 } else if (const StructType *STy = dyn_cast<StructType>(Ty)) { 745 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 746 unsigned EltAlign = 0; 747 getMaxByValAlign(STy->getElementType(i), EltAlign); 748 if (EltAlign > MaxAlign) 749 MaxAlign = EltAlign; 750 if (MaxAlign == 16) 751 break; 752 } 753 } 754 return; 755} 756 757/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 758/// function arguments in the caller parameter area. For X86, aggregates 759/// that contain SSE vectors are placed at 16-byte boundaries while the rest 760/// are at 4-byte boundaries. 761unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const { 762 if (Subtarget->is64Bit()) 763 return getTargetData()->getABITypeAlignment(Ty); 764 unsigned Align = 4; 765 if (Subtarget->hasSSE1()) 766 getMaxByValAlign(Ty, Align); 767 return Align; 768} 769 770/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 771/// jumptable. 772SDOperand X86TargetLowering::getPICJumpTableRelocBase(SDOperand Table, 773 SelectionDAG &DAG) const { 774 if (usesGlobalOffsetTable()) 775 return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy()); 776 if (!Subtarget->isPICStyleRIPRel()) 777 return DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()); 778 return Table; 779} 780 781//===----------------------------------------------------------------------===// 782// Return Value Calling Convention Implementation 783//===----------------------------------------------------------------------===// 784 785#include "X86GenCallingConv.inc" 786 787/// GetPossiblePreceedingTailCall - Get preceeding X86ISD::TAILCALL node if it 788/// exists skip possible ISD:TokenFactor. 789static SDOperand GetPossiblePreceedingTailCall(SDOperand Chain) { 790 if (Chain.getOpcode() == X86ISD::TAILCALL) { 791 return Chain; 792 } else if (Chain.getOpcode() == ISD::TokenFactor) { 793 if (Chain.getNumOperands() && 794 Chain.getOperand(0).getOpcode() == X86ISD::TAILCALL) 795 return Chain.getOperand(0); 796 } 797 return Chain; 798} 799 800/// LowerRET - Lower an ISD::RET node. 801SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { 802 assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args"); 803 804 SmallVector<CCValAssign, 16> RVLocs; 805 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 806 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 807 CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs); 808 CCInfo.AnalyzeReturn(Op.Val, RetCC_X86); 809 810 // If this is the first return lowered for this function, add the regs to the 811 // liveout set for the function. 812 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 813 for (unsigned i = 0; i != RVLocs.size(); ++i) 814 if (RVLocs[i].isRegLoc()) 815 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 816 } 817 SDOperand Chain = Op.getOperand(0); 818 819 // Handle tail call return. 820 Chain = GetPossiblePreceedingTailCall(Chain); 821 if (Chain.getOpcode() == X86ISD::TAILCALL) { 822 SDOperand TailCall = Chain; 823 SDOperand TargetAddress = TailCall.getOperand(1); 824 SDOperand StackAdjustment = TailCall.getOperand(2); 825 assert(((TargetAddress.getOpcode() == ISD::Register && 826 (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::ECX || 827 cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) || 828 TargetAddress.getOpcode() == ISD::TargetExternalSymbol || 829 TargetAddress.getOpcode() == ISD::TargetGlobalAddress) && 830 "Expecting an global address, external symbol, or register"); 831 assert(StackAdjustment.getOpcode() == ISD::Constant && 832 "Expecting a const value"); 833 834 SmallVector<SDOperand,8> Operands; 835 Operands.push_back(Chain.getOperand(0)); 836 Operands.push_back(TargetAddress); 837 Operands.push_back(StackAdjustment); 838 // Copy registers used by the call. Last operand is a flag so it is not 839 // copied. 840 for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) { 841 Operands.push_back(Chain.getOperand(i)); 842 } 843 return DAG.getNode(X86ISD::TC_RETURN, MVT::Other, &Operands[0], 844 Operands.size()); 845 } 846 847 // Regular return. 848 SDOperand Flag; 849 850 // Copy the result values into the output registers. 851 if (RVLocs.size() != 1 || !RVLocs[0].isRegLoc() || 852 RVLocs[0].getLocReg() != X86::ST0) { 853 for (unsigned i = 0; i != RVLocs.size(); ++i) { 854 CCValAssign &VA = RVLocs[i]; 855 assert(VA.isRegLoc() && "Can only return in registers!"); 856 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1), 857 Flag); 858 Flag = Chain.getValue(1); 859 } 860 } else { 861 // We need to handle a destination of ST0 specially, because it isn't really 862 // a register. 863 SDOperand Value = Op.getOperand(1); 864 865 // an XMM register onto the fp-stack. Do this with an FP_EXTEND to f80. 866 // This will get legalized into a load/store if it can't get optimized away. 867 if (isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) 868 Value = DAG.getNode(ISD::FP_EXTEND, MVT::f80, Value); 869 870 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 871 SDOperand Ops[] = { Chain, Value }; 872 Chain = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops, 2); 873 Flag = Chain.getValue(1); 874 } 875 876 SDOperand BytesToPop = DAG.getConstant(getBytesToPopOnReturn(), MVT::i16); 877 if (Flag.Val) 878 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop, Flag); 879 else 880 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop); 881} 882 883 884/// LowerCallResult - Lower the result values of an ISD::CALL into the 885/// appropriate copies out of appropriate physical registers. This assumes that 886/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call 887/// being lowered. The returns a SDNode with the same number of values as the 888/// ISD::CALL. 889SDNode *X86TargetLowering:: 890LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, 891 unsigned CallingConv, SelectionDAG &DAG) { 892 893 // Assign locations to each value returned by this call. 894 SmallVector<CCValAssign, 16> RVLocs; 895 bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0; 896 CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs); 897 CCInfo.AnalyzeCallResult(TheCall, RetCC_X86); 898 899 SmallVector<SDOperand, 8> ResultVals; 900 901 // Copy all of the result registers out of their specified physreg. 902 if (RVLocs.size() != 1 || RVLocs[0].getLocReg() != X86::ST0) { 903 for (unsigned i = 0; i != RVLocs.size(); ++i) { 904 Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(), 905 RVLocs[i].getValVT(), InFlag).getValue(1); 906 InFlag = Chain.getValue(2); 907 ResultVals.push_back(Chain.getValue(0)); 908 } 909 } else { 910 // Copies from the FP stack are special, as ST0 isn't a valid register 911 // before the fp stackifier runs. 912 913 // Copy ST0 into an RFP register with FP_GET_RESULT. If this will end up 914 // in an SSE register, copy it out as F80 and do a truncate, otherwise use 915 // the specified value type. 916 MVT::ValueType GetResultTy = RVLocs[0].getValVT(); 917 if (isScalarFPTypeInSSEReg(GetResultTy)) 918 GetResultTy = MVT::f80; 919 SDVTList Tys = DAG.getVTList(GetResultTy, MVT::Other, MVT::Flag); 920 921 SDOperand GROps[] = { Chain, InFlag }; 922 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, GROps, 2); 923 Chain = RetVal.getValue(1); 924 InFlag = RetVal.getValue(2); 925 926 // If we want the result in an SSE register, use an FP_TRUNCATE to get it 927 // there. 928 if (GetResultTy != RVLocs[0].getValVT()) 929 RetVal = DAG.getNode(ISD::FP_ROUND, RVLocs[0].getValVT(), RetVal, 930 // This truncation won't change the value. 931 DAG.getIntPtrConstant(1)); 932 933 ResultVals.push_back(RetVal); 934 } 935 936 // Merge everything together with a MERGE_VALUES node. 937 ResultVals.push_back(Chain); 938 return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(), 939 &ResultVals[0], ResultVals.size()).Val; 940} 941 942/// LowerCallResultToTwo64BitRegs - Lower the result values of an x86-64 943/// ISD::CALL where the results are known to be in two 64-bit registers, 944/// e.g. XMM0 and XMM1. This simplify store the two values back to the 945/// fixed stack slot allocated for StructRet. 946SDNode *X86TargetLowering:: 947LowerCallResultToTwo64BitRegs(SDOperand Chain, SDOperand InFlag, 948 SDNode *TheCall, unsigned Reg1, unsigned Reg2, 949 MVT::ValueType VT, SelectionDAG &DAG) { 950 SDOperand RetVal1 = DAG.getCopyFromReg(Chain, Reg1, VT, InFlag); 951 Chain = RetVal1.getValue(1); 952 InFlag = RetVal1.getValue(2); 953 SDOperand RetVal2 = DAG.getCopyFromReg(Chain, Reg2, VT, InFlag); 954 Chain = RetVal2.getValue(1); 955 InFlag = RetVal2.getValue(2); 956 SDOperand FIN = TheCall->getOperand(5); 957 Chain = DAG.getStore(Chain, RetVal1, FIN, NULL, 0); 958 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); 959 Chain = DAG.getStore(Chain, RetVal2, FIN, NULL, 0); 960 return Chain.Val; 961} 962 963/// LowerCallResultToTwoX87Regs - Lower the result values of an x86-64 ISD::CALL 964/// where the results are known to be in ST0 and ST1. 965SDNode *X86TargetLowering:: 966LowerCallResultToTwoX87Regs(SDOperand Chain, SDOperand InFlag, 967 SDNode *TheCall, SelectionDAG &DAG) { 968 SmallVector<SDOperand, 8> ResultVals; 969 const MVT::ValueType VTs[] = { MVT::f80, MVT::f80, MVT::Other, MVT::Flag }; 970 SDVTList Tys = DAG.getVTList(VTs, 4); 971 SDOperand Ops[] = { Chain, InFlag }; 972 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT2, Tys, Ops, 2); 973 Chain = RetVal.getValue(2); 974 SDOperand FIN = TheCall->getOperand(5); 975 Chain = DAG.getStore(Chain, RetVal.getValue(1), FIN, NULL, 0); 976 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(16)); 977 Chain = DAG.getStore(Chain, RetVal, FIN, NULL, 0); 978 return Chain.Val; 979} 980 981//===----------------------------------------------------------------------===// 982// C & StdCall & Fast Calling Convention implementation 983//===----------------------------------------------------------------------===// 984// StdCall calling convention seems to be standard for many Windows' API 985// routines and around. It differs from C calling convention just a little: 986// callee should clean up the stack, not caller. Symbols should be also 987// decorated in some fancy way :) It doesn't support any vector arguments. 988// For info on fast calling convention see Fast Calling Convention (tail call) 989// implementation LowerX86_32FastCCCallTo. 990 991/// AddLiveIn - This helper function adds the specified physical register to the 992/// MachineFunction as a live in value. It also creates a corresponding virtual 993/// register for it. 994static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg, 995 const TargetRegisterClass *RC) { 996 assert(RC->contains(PReg) && "Not the correct regclass!"); 997 unsigned VReg = MF.getRegInfo().createVirtualRegister(RC); 998 MF.getRegInfo().addLiveIn(PReg, VReg); 999 return VReg; 1000} 1001 1002/// CallIsStructReturn - Determines whether a CALL node uses struct return 1003/// semantics. 1004static bool CallIsStructReturn(SDOperand Op) { 1005 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 1006 if (!NumOps) 1007 return false; 1008 1009 ConstantSDNode *Flags = cast<ConstantSDNode>(Op.getOperand(6)); 1010 return Flags->getValue() & ISD::ParamFlags::StructReturn; 1011} 1012 1013/// ArgsAreStructReturn - Determines whether a FORMAL_ARGUMENTS node uses struct 1014/// return semantics. 1015static bool ArgsAreStructReturn(SDOperand Op) { 1016 unsigned NumArgs = Op.Val->getNumValues() - 1; 1017 if (!NumArgs) 1018 return false; 1019 1020 ConstantSDNode *Flags = cast<ConstantSDNode>(Op.getOperand(3)); 1021 return Flags->getValue() & ISD::ParamFlags::StructReturn; 1022} 1023 1024/// IsCalleePop - Determines whether a CALL or FORMAL_ARGUMENTS node requires the 1025/// callee to pop its own arguments. Callee pop is necessary to support tail 1026/// calls. 1027bool X86TargetLowering::IsCalleePop(SDOperand Op) { 1028 bool IsVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1029 if (IsVarArg) 1030 return false; 1031 1032 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { 1033 default: 1034 return false; 1035 case CallingConv::X86_StdCall: 1036 return !Subtarget->is64Bit(); 1037 case CallingConv::X86_FastCall: 1038 return !Subtarget->is64Bit(); 1039 case CallingConv::Fast: 1040 return PerformTailCallOpt; 1041 } 1042} 1043 1044/// CCAssignFnForNode - Selects the correct CCAssignFn for a CALL or 1045/// FORMAL_ARGUMENTS node. 1046CCAssignFn *X86TargetLowering::CCAssignFnForNode(SDOperand Op) const { 1047 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1048 1049 if (Subtarget->is64Bit()) { 1050 if (CC == CallingConv::Fast && PerformTailCallOpt) 1051 return CC_X86_64_TailCall; 1052 else 1053 return CC_X86_64_C; 1054 } 1055 1056 if (CC == CallingConv::X86_FastCall) 1057 return CC_X86_32_FastCall; 1058 else if (CC == CallingConv::Fast && PerformTailCallOpt) 1059 return CC_X86_32_TailCall; 1060 else 1061 return CC_X86_32_C; 1062} 1063 1064/// NameDecorationForFORMAL_ARGUMENTS - Selects the appropriate decoration to 1065/// apply to a MachineFunction containing a given FORMAL_ARGUMENTS node. 1066NameDecorationStyle 1067X86TargetLowering::NameDecorationForFORMAL_ARGUMENTS(SDOperand Op) { 1068 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1069 if (CC == CallingConv::X86_FastCall) 1070 return FastCall; 1071 else if (CC == CallingConv::X86_StdCall) 1072 return StdCall; 1073 return None; 1074} 1075 1076/// IsPossiblyOverwrittenArgumentOfTailCall - Check if the operand could 1077/// possibly be overwritten when lowering the outgoing arguments in a tail 1078/// call. Currently the implementation of this call is very conservative and 1079/// assumes all arguments sourcing from FORMAL_ARGUMENTS or a CopyFromReg with 1080/// virtual registers would be overwritten by direct lowering. 1081static bool IsPossiblyOverwrittenArgumentOfTailCall(SDOperand Op, 1082 MachineFrameInfo * MFI) { 1083 RegisterSDNode * OpReg = NULL; 1084 FrameIndexSDNode * FrameIdxNode = NULL; 1085 int FrameIdx = 0; 1086 if (Op.getOpcode() == ISD::FORMAL_ARGUMENTS || 1087 (Op.getOpcode()== ISD::CopyFromReg && 1088 (OpReg = dyn_cast<RegisterSDNode>(Op.getOperand(1))) && 1089 (OpReg->getReg() >= TargetRegisterInfo::FirstVirtualRegister)) || 1090 (Op.getOpcode() == ISD::LOAD && 1091 (FrameIdxNode = dyn_cast<FrameIndexSDNode>(Op.getOperand(1))) && 1092 (MFI->isFixedObjectIndex((FrameIdx = FrameIdxNode->getIndex()))) && 1093 (MFI->getObjectOffset(FrameIdx) >= 0))) 1094 return true; 1095 return false; 1096} 1097 1098/// CallRequiresGOTInRegister - Check whether the call requires the GOT pointer 1099/// in a register before calling. 1100bool X86TargetLowering::CallRequiresGOTPtrInReg(bool Is64Bit, bool IsTailCall) { 1101 return !IsTailCall && !Is64Bit && 1102 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1103 Subtarget->isPICStyleGOT(); 1104} 1105 1106 1107/// CallRequiresFnAddressInReg - Check whether the call requires the function 1108/// address to be loaded in a register. 1109bool 1110X86TargetLowering::CallRequiresFnAddressInReg(bool Is64Bit, bool IsTailCall) { 1111 return !Is64Bit && IsTailCall && 1112 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1113 Subtarget->isPICStyleGOT(); 1114} 1115 1116/// CopyTailCallClobberedArgumentsToVRegs - Create virtual registers for all 1117/// arguments to force loading and guarantee that arguments sourcing from 1118/// incomming parameters are not overwriting each other. 1119static SDOperand 1120CopyTailCallClobberedArgumentsToVRegs(SDOperand Chain, 1121 SmallVector<std::pair<unsigned, SDOperand>, 8> &TailCallClobberedVRegs, 1122 SelectionDAG &DAG, 1123 MachineFunction &MF, 1124 const TargetLowering * TL) { 1125 1126 SDOperand InFlag; 1127 for (unsigned i = 0, e = TailCallClobberedVRegs.size(); i != e; i++) { 1128 SDOperand Arg = TailCallClobberedVRegs[i].second; 1129 unsigned Idx = TailCallClobberedVRegs[i].first; 1130 unsigned VReg = 1131 MF.getRegInfo(). 1132 createVirtualRegister(TL->getRegClassFor(Arg.getValueType())); 1133 Chain = DAG.getCopyToReg(Chain, VReg, Arg, InFlag); 1134 InFlag = Chain.getValue(1); 1135 Arg = DAG.getCopyFromReg(Chain, VReg, Arg.getValueType(), InFlag); 1136 TailCallClobberedVRegs[i] = std::make_pair(Idx, Arg); 1137 Chain = Arg.getValue(1); 1138 InFlag = Arg.getValue(2); 1139 } 1140 return Chain; 1141} 1142 1143/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1144/// by "Src" to address "Dst" with size and alignment information specified by 1145/// the specific parameter attribute. The copy will be passed as a byval function 1146/// parameter. 1147static SDOperand 1148CreateCopyOfByValArgument(SDOperand Src, SDOperand Dst, SDOperand Chain, 1149 unsigned Flags, SelectionDAG &DAG) { 1150 unsigned Align = 1 << 1151 ((Flags & ISD::ParamFlags::ByValAlign) >> ISD::ParamFlags::ByValAlignOffs); 1152 unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >> 1153 ISD::ParamFlags::ByValSizeOffs; 1154 SDOperand AlignNode = DAG.getConstant(Align, MVT::i32); 1155 SDOperand SizeNode = DAG.getConstant(Size, MVT::i32); 1156 SDOperand AlwaysInline = DAG.getConstant(1, MVT::i32); 1157 return DAG.getMemcpy(Chain, Dst, Src, SizeNode, AlignNode, AlwaysInline); 1158} 1159 1160SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG, 1161 const CCValAssign &VA, 1162 MachineFrameInfo *MFI, 1163 unsigned CC, 1164 SDOperand Root, unsigned i) { 1165 // Create the nodes corresponding to a load from this parameter slot. 1166 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue(); 1167 bool AlwaysUseMutable = (CC==CallingConv::Fast) && PerformTailCallOpt; 1168 bool isByVal = Flags & ISD::ParamFlags::ByVal; 1169 bool isImmutable = !AlwaysUseMutable && !isByVal; 1170 1171 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1172 // changed with more analysis. 1173 // In case of tail call optimization mark all arguments mutable. Since they 1174 // could be overwritten by lowering of arguments in case of a tail call. 1175 int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, 1176 VA.getLocMemOffset(), isImmutable); 1177 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 1178 if (isByVal) 1179 return FIN; 1180 return DAG.getLoad(VA.getValVT(), Root, FIN, 1181 PseudoSourceValue::getFixedStack(), FI); 1182} 1183 1184SDOperand 1185X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { 1186 MachineFunction &MF = DAG.getMachineFunction(); 1187 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1188 1189 const Function* Fn = MF.getFunction(); 1190 if (Fn->hasExternalLinkage() && 1191 Subtarget->isTargetCygMing() && 1192 Fn->getName() == "main") 1193 FuncInfo->setForceFramePointer(true); 1194 1195 // Decorate the function name. 1196 FuncInfo->setDecorationStyle(NameDecorationForFORMAL_ARGUMENTS(Op)); 1197 1198 MachineFrameInfo *MFI = MF.getFrameInfo(); 1199 SDOperand Root = Op.getOperand(0); 1200 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1201 unsigned CC = MF.getFunction()->getCallingConv(); 1202 bool Is64Bit = Subtarget->is64Bit(); 1203 1204 assert(!(isVarArg && CC == CallingConv::Fast) && 1205 "Var args not supported with calling convention fastcc"); 1206 1207 // Assign locations to all of the incoming arguments. 1208 SmallVector<CCValAssign, 16> ArgLocs; 1209 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1210 CCInfo.AnalyzeFormalArguments(Op.Val, CCAssignFnForNode(Op)); 1211 1212 SmallVector<SDOperand, 8> ArgValues; 1213 unsigned LastVal = ~0U; 1214 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1215 CCValAssign &VA = ArgLocs[i]; 1216 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1217 // places. 1218 assert(VA.getValNo() != LastVal && 1219 "Don't support value assigned to multiple locs yet"); 1220 LastVal = VA.getValNo(); 1221 1222 if (VA.isRegLoc()) { 1223 MVT::ValueType RegVT = VA.getLocVT(); 1224 TargetRegisterClass *RC; 1225 if (RegVT == MVT::i32) 1226 RC = X86::GR32RegisterClass; 1227 else if (Is64Bit && RegVT == MVT::i64) 1228 RC = X86::GR64RegisterClass; 1229 else if (RegVT == MVT::f32) 1230 RC = X86::FR32RegisterClass; 1231 else if (RegVT == MVT::f64) 1232 RC = X86::FR64RegisterClass; 1233 else { 1234 assert(MVT::isVector(RegVT)); 1235 if (Is64Bit && MVT::getSizeInBits(RegVT) == 64) { 1236 RC = X86::GR64RegisterClass; // MMX values are passed in GPRs. 1237 RegVT = MVT::i64; 1238 } else 1239 RC = X86::VR128RegisterClass; 1240 } 1241 1242 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 1243 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 1244 1245 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1246 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1247 // right size. 1248 if (VA.getLocInfo() == CCValAssign::SExt) 1249 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 1250 DAG.getValueType(VA.getValVT())); 1251 else if (VA.getLocInfo() == CCValAssign::ZExt) 1252 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 1253 DAG.getValueType(VA.getValVT())); 1254 1255 if (VA.getLocInfo() != CCValAssign::Full) 1256 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 1257 1258 // Handle MMX values passed in GPRs. 1259 if (Is64Bit && RegVT != VA.getLocVT() && RC == X86::GR64RegisterClass && 1260 MVT::getSizeInBits(RegVT) == 64) 1261 ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); 1262 1263 ArgValues.push_back(ArgValue); 1264 } else { 1265 assert(VA.isMemLoc()); 1266 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, CC, Root, i)); 1267 } 1268 } 1269 1270 unsigned StackSize = CCInfo.getNextStackOffset(); 1271 // align stack specially for tail calls 1272 if (CC == CallingConv::Fast) 1273 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1274 1275 // If the function takes variable number of arguments, make a frame index for 1276 // the start of the first vararg value... for expansion of llvm.va_start. 1277 if (isVarArg) { 1278 if (Is64Bit || CC != CallingConv::X86_FastCall) { 1279 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); 1280 } 1281 if (Is64Bit) { 1282 static const unsigned GPR64ArgRegs[] = { 1283 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1284 }; 1285 static const unsigned XMMArgRegs[] = { 1286 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1287 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1288 }; 1289 1290 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 6); 1291 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1292 1293 // For X86-64, if there are vararg parameters that are passed via 1294 // registers, then we must store them to their spots on the stack so they 1295 // may be loaded by deferencing the result of va_next. 1296 VarArgsGPOffset = NumIntRegs * 8; 1297 VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16; 1298 RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16); 1299 1300 // Store the integer parameter registers. 1301 SmallVector<SDOperand, 8> MemOps; 1302 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 1303 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1304 DAG.getIntPtrConstant(VarArgsGPOffset)); 1305 for (; NumIntRegs != 6; ++NumIntRegs) { 1306 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], 1307 X86::GR64RegisterClass); 1308 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1309 SDOperand Store = 1310 DAG.getStore(Val.getValue(1), Val, FIN, 1311 PseudoSourceValue::getFixedStack(), 1312 RegSaveFrameIndex); 1313 MemOps.push_back(Store); 1314 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1315 DAG.getIntPtrConstant(8)); 1316 } 1317 1318 // Now store the XMM (fp + vector) parameter registers. 1319 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1320 DAG.getIntPtrConstant(VarArgsFPOffset)); 1321 for (; NumXMMRegs != 8; ++NumXMMRegs) { 1322 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 1323 X86::VR128RegisterClass); 1324 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); 1325 SDOperand Store = 1326 DAG.getStore(Val.getValue(1), Val, FIN, 1327 PseudoSourceValue::getFixedStack(), 1328 RegSaveFrameIndex); 1329 MemOps.push_back(Store); 1330 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1331 DAG.getIntPtrConstant(16)); 1332 } 1333 if (!MemOps.empty()) 1334 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 1335 &MemOps[0], MemOps.size()); 1336 } 1337 } 1338 1339 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1340 // arguments and the arguments after the retaddr has been pushed are 1341 // aligned. 1342 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1343 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1344 (StackSize & 7) == 0) 1345 StackSize += 4; 1346 1347 ArgValues.push_back(Root); 1348 1349 // Some CCs need callee pop. 1350 if (IsCalleePop(Op)) { 1351 BytesToPopOnReturn = StackSize; // Callee pops everything. 1352 BytesCallerReserves = 0; 1353 } else { 1354 BytesToPopOnReturn = 0; // Callee pops nothing. 1355 // If this is an sret function, the return should pop the hidden pointer. 1356 if (!Is64Bit && ArgsAreStructReturn(Op)) 1357 BytesToPopOnReturn = 4; 1358 BytesCallerReserves = StackSize; 1359 } 1360 1361 if (!Is64Bit) { 1362 RegSaveFrameIndex = 0xAAAAAAA; // RegSaveFrameIndex is X86-64 only. 1363 if (CC == CallingConv::X86_FastCall) 1364 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 1365 } 1366 1367 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 1368 1369 // Return the new list of results. 1370 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1371 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1372} 1373 1374SDOperand 1375X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG, 1376 const SDOperand &StackPtr, 1377 const CCValAssign &VA, 1378 SDOperand Chain, 1379 SDOperand Arg) { 1380 unsigned LocMemOffset = VA.getLocMemOffset(); 1381 SDOperand PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1382 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1383 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1384 unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue(); 1385 if (Flags & ISD::ParamFlags::ByVal) { 1386 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG); 1387 } 1388 return DAG.getStore(Chain, Arg, PtrOff, 1389 PseudoSourceValue::getStack(), LocMemOffset); 1390} 1391 1392/// ClassifyX86_64SRetCallReturn - Classify how to implement a x86-64 1393/// struct return call to the specified function. X86-64 ABI specifies 1394/// some SRet calls are actually returned in registers. Since current 1395/// LLVM cannot represent multi-value calls, they are represent as 1396/// calls where the results are passed in a hidden struct provided by 1397/// the caller. This function examines the type of the struct to 1398/// determine the correct way to implement the call. 1399X86::X86_64SRet 1400X86TargetLowering::ClassifyX86_64SRetCallReturn(const Function *Fn) { 1401 // FIXME: Disabled for now. 1402 return X86::InMemory; 1403 1404 const PointerType *PTy = cast<PointerType>(Fn->arg_begin()->getType()); 1405 const Type *RTy = PTy->getElementType(); 1406 unsigned Size = getTargetData()->getABITypeSize(RTy); 1407 if (Size != 16 && Size != 32) 1408 return X86::InMemory; 1409 1410 if (Size == 32) { 1411 const StructType *STy = dyn_cast<StructType>(RTy); 1412 if (!STy) return X86::InMemory; 1413 if (STy->getNumElements() == 2 && 1414 STy->getElementType(0) == Type::X86_FP80Ty && 1415 STy->getElementType(1) == Type::X86_FP80Ty) 1416 return X86::InX87; 1417 } 1418 1419 bool AllFP = true; 1420 for (Type::subtype_iterator I = RTy->subtype_begin(), E = RTy->subtype_end(); 1421 I != E; ++I) { 1422 const Type *STy = I->get(); 1423 if (!STy->isFPOrFPVector()) { 1424 AllFP = false; 1425 break; 1426 } 1427 } 1428 1429 if (AllFP) 1430 return X86::InSSE; 1431 return X86::InGPR64; 1432} 1433 1434void X86TargetLowering::X86_64AnalyzeSRetCallOperands(SDNode *TheCall, 1435 CCAssignFn *Fn, 1436 CCState &CCInfo) { 1437 unsigned NumOps = (TheCall->getNumOperands() - 5) / 2; 1438 for (unsigned i = 1; i != NumOps; ++i) { 1439 MVT::ValueType ArgVT = TheCall->getOperand(5+2*i).getValueType(); 1440 SDOperand FlagOp = TheCall->getOperand(5+2*i+1); 1441 unsigned ArgFlags =cast<ConstantSDNode>(FlagOp)->getValue(); 1442 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo)) { 1443 cerr << "Call operand #" << i << " has unhandled type " 1444 << MVT::getValueTypeString(ArgVT) << "\n"; 1445 abort(); 1446 } 1447 } 1448} 1449 1450SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { 1451 MachineFunction &MF = DAG.getMachineFunction(); 1452 MachineFrameInfo * MFI = MF.getFrameInfo(); 1453 SDOperand Chain = Op.getOperand(0); 1454 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1455 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1456 bool IsTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0 1457 && CC == CallingConv::Fast && PerformTailCallOpt; 1458 SDOperand Callee = Op.getOperand(4); 1459 bool Is64Bit = Subtarget->is64Bit(); 1460 bool IsStructRet = CallIsStructReturn(Op); 1461 1462 assert(!(isVarArg && CC == CallingConv::Fast) && 1463 "Var args not supported with calling convention fastcc"); 1464 1465 // Analyze operands of the call, assigning locations to each operand. 1466 SmallVector<CCValAssign, 16> ArgLocs; 1467 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1468 CCAssignFn *CCFn = CCAssignFnForNode(Op); 1469 1470 X86::X86_64SRet SRetMethod = X86::InMemory; 1471 if (Is64Bit && IsStructRet) 1472 // FIXME: We can't figure out type of the sret structure for indirect 1473 // calls. We need to copy more information from CallSite to the ISD::CALL 1474 // node. 1475 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1476 SRetMethod = 1477 ClassifyX86_64SRetCallReturn(dyn_cast<Function>(G->getGlobal())); 1478 1479 // UGLY HACK! For x86-64, some 128-bit aggregates are returns in a pair of 1480 // registers. Unfortunately, llvm does not support i128 yet so we pretend it's 1481 // a sret call. 1482 if (SRetMethod != X86::InMemory) 1483 X86_64AnalyzeSRetCallOperands(Op.Val, CCFn, CCInfo); 1484 else 1485 CCInfo.AnalyzeCallOperands(Op.Val, CCFn); 1486 1487 // Get a count of how many bytes are to be pushed on the stack. 1488 unsigned NumBytes = CCInfo.getNextStackOffset(); 1489 if (CC == CallingConv::Fast) 1490 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 1491 1492 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1493 // arguments and the arguments after the retaddr has been pushed are aligned. 1494 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1495 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1496 (NumBytes & 7) == 0) 1497 NumBytes += 4; 1498 1499 int FPDiff = 0; 1500 if (IsTailCall) { 1501 // Lower arguments at fp - stackoffset + fpdiff. 1502 unsigned NumBytesCallerPushed = 1503 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 1504 FPDiff = NumBytesCallerPushed - NumBytes; 1505 1506 // Set the delta of movement of the returnaddr stackslot. 1507 // But only set if delta is greater than previous delta. 1508 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 1509 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 1510 } 1511 1512 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes)); 1513 1514 SDOperand RetAddrFrIdx; 1515 if (IsTailCall) { 1516 // Adjust the Return address stack slot. 1517 if (FPDiff) { 1518 MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32; 1519 RetAddrFrIdx = getReturnAddressFrameIndex(DAG); 1520 // Load the "old" Return address. 1521 RetAddrFrIdx = 1522 DAG.getLoad(VT, Chain,RetAddrFrIdx, NULL, 0); 1523 Chain = SDOperand(RetAddrFrIdx.Val, 1); 1524 } 1525 } 1526 1527 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1528 SmallVector<std::pair<unsigned, SDOperand>, 8> TailCallClobberedVRegs; 1529 SmallVector<SDOperand, 8> MemOpChains; 1530 1531 SDOperand StackPtr; 1532 1533 // Walk the register/memloc assignments, inserting copies/loads. For tail 1534 // calls, remember all arguments for later special lowering. 1535 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1536 CCValAssign &VA = ArgLocs[i]; 1537 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1538 1539 // Promote the value if needed. 1540 switch (VA.getLocInfo()) { 1541 default: assert(0 && "Unknown loc info!"); 1542 case CCValAssign::Full: break; 1543 case CCValAssign::SExt: 1544 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1545 break; 1546 case CCValAssign::ZExt: 1547 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1548 break; 1549 case CCValAssign::AExt: 1550 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1551 break; 1552 } 1553 1554 if (VA.isRegLoc()) { 1555 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1556 } else { 1557 if (!IsTailCall) { 1558 assert(VA.isMemLoc()); 1559 if (StackPtr.Val == 0) 1560 StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy()); 1561 1562 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1563 Arg)); 1564 } else if (IsPossiblyOverwrittenArgumentOfTailCall(Arg, MFI)) { 1565 TailCallClobberedVRegs.push_back(std::make_pair(i,Arg)); 1566 } 1567 } 1568 } 1569 1570 if (!MemOpChains.empty()) 1571 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1572 &MemOpChains[0], MemOpChains.size()); 1573 1574 // Build a sequence of copy-to-reg nodes chained together with token chain 1575 // and flag operands which copy the outgoing args into registers. 1576 SDOperand InFlag; 1577 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1578 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1579 InFlag); 1580 InFlag = Chain.getValue(1); 1581 } 1582 1583 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1584 // GOT pointer. 1585 if (CallRequiresGOTPtrInReg(Is64Bit, IsTailCall)) { 1586 Chain = DAG.getCopyToReg(Chain, X86::EBX, 1587 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 1588 InFlag); 1589 InFlag = Chain.getValue(1); 1590 } 1591 // If we are tail calling and generating PIC/GOT style code load the address 1592 // of the callee into ecx. The value in ecx is used as target of the tail 1593 // jump. This is done to circumvent the ebx/callee-saved problem for tail 1594 // calls on PIC/GOT architectures. Normally we would just put the address of 1595 // GOT into ebx and then call target@PLT. But for tail callss ebx would be 1596 // restored (since ebx is callee saved) before jumping to the target@PLT. 1597 if (CallRequiresFnAddressInReg(Is64Bit, IsTailCall)) { 1598 // Note: The actual moving to ecx is done further down. 1599 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 1600 if (G && !G->getGlobal()->hasHiddenVisibility() && 1601 !G->getGlobal()->hasProtectedVisibility()) 1602 Callee = LowerGlobalAddress(Callee, DAG); 1603 else if (isa<ExternalSymbolSDNode>(Callee)) 1604 Callee = LowerExternalSymbol(Callee,DAG); 1605 } 1606 1607 if (Is64Bit && isVarArg) { 1608 // From AMD64 ABI document: 1609 // For calls that may call functions that use varargs or stdargs 1610 // (prototype-less calls or calls to functions containing ellipsis (...) in 1611 // the declaration) %al is used as hidden argument to specify the number 1612 // of SSE registers used. The contents of %al do not need to match exactly 1613 // the number of registers, but must be an ubound on the number of SSE 1614 // registers used and is in the range 0 - 8 inclusive. 1615 1616 // Count the number of XMM registers allocated. 1617 static const unsigned XMMArgRegs[] = { 1618 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1619 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1620 }; 1621 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1622 1623 Chain = DAG.getCopyToReg(Chain, X86::AL, 1624 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 1625 InFlag = Chain.getValue(1); 1626 } 1627 1628 1629 // For tail calls lower the arguments to the 'real' stack slot. 1630 if (IsTailCall) { 1631 SmallVector<SDOperand, 8> MemOpChains2; 1632 SDOperand FIN; 1633 int FI = 0; 1634 // Do not flag preceeding copytoreg stuff together with the following stuff. 1635 InFlag = SDOperand(); 1636 1637 Chain = CopyTailCallClobberedArgumentsToVRegs(Chain, TailCallClobberedVRegs, 1638 DAG, MF, this); 1639 1640 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1641 CCValAssign &VA = ArgLocs[i]; 1642 if (!VA.isRegLoc()) { 1643 assert(VA.isMemLoc()); 1644 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1645 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1646 unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue(); 1647 // Create frame index. 1648 int32_t Offset = VA.getLocMemOffset()+FPDiff; 1649 uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8; 1650 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); 1651 FIN = DAG.getFrameIndex(FI, MVT::i32); 1652 1653 // Find virtual register for this argument. 1654 bool Found=false; 1655 for (unsigned idx=0, e= TailCallClobberedVRegs.size(); idx < e; idx++) 1656 if (TailCallClobberedVRegs[idx].first==i) { 1657 Arg = TailCallClobberedVRegs[idx].second; 1658 Found=true; 1659 break; 1660 } 1661 assert(IsPossiblyOverwrittenArgumentOfTailCall(Arg, MFI)==false || 1662 (Found==true && "No corresponding Argument was found")); 1663 1664 if (Flags & ISD::ParamFlags::ByVal) { 1665 // Copy relative to framepointer. 1666 MemOpChains2.push_back(CreateCopyOfByValArgument(Arg, FIN, Chain, 1667 Flags, DAG)); 1668 } else { 1669 // Store relative to framepointer. 1670 MemOpChains2.push_back( 1671 DAG.getStore(Chain, Arg, FIN, 1672 PseudoSourceValue::getFixedStack(), FI)); 1673 } 1674 } 1675 } 1676 1677 if (!MemOpChains2.empty()) 1678 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1679 &MemOpChains2[0], MemOpChains2.size()); 1680 1681 // Store the return address to the appropriate stack slot. 1682 if (FPDiff) { 1683 // Calculate the new stack slot for the return address. 1684 int SlotSize = Is64Bit ? 8 : 4; 1685 int NewReturnAddrFI = 1686 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize); 1687 MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32; 1688 SDOperand NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 1689 Chain = DAG.getStore(Chain, RetAddrFrIdx, NewRetAddrFrIdx, 1690 PseudoSourceValue::getFixedStack(), NewReturnAddrFI); 1691 } 1692 } 1693 1694 // If the callee is a GlobalAddress node (quite common, every direct call is) 1695 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1696 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1697 // We should use extra load for direct calls to dllimported functions in 1698 // non-JIT mode. 1699 if ((IsTailCall || !Is64Bit || 1700 getTargetMachine().getCodeModel() != CodeModel::Large) 1701 && !Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1702 getTargetMachine(), true)) 1703 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1704 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1705 if (IsTailCall || !Is64Bit || 1706 getTargetMachine().getCodeModel() != CodeModel::Large) 1707 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1708 } else if (IsTailCall) { 1709 unsigned Opc = Is64Bit ? X86::R9 : X86::ECX; 1710 1711 Chain = DAG.getCopyToReg(Chain, 1712 DAG.getRegister(Opc, getPointerTy()), 1713 Callee,InFlag); 1714 Callee = DAG.getRegister(Opc, getPointerTy()); 1715 // Add register as live out. 1716 DAG.getMachineFunction().getRegInfo().addLiveOut(Opc); 1717 } 1718 1719 // Returns a chain & a flag for retval copy to use. 1720 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1721 SmallVector<SDOperand, 8> Ops; 1722 1723 if (IsTailCall) { 1724 Ops.push_back(Chain); 1725 Ops.push_back(DAG.getIntPtrConstant(NumBytes)); 1726 Ops.push_back(DAG.getIntPtrConstant(0)); 1727 if (InFlag.Val) 1728 Ops.push_back(InFlag); 1729 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1730 InFlag = Chain.getValue(1); 1731 1732 // Returns a chain & a flag for retval copy to use. 1733 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1734 Ops.clear(); 1735 } 1736 1737 Ops.push_back(Chain); 1738 Ops.push_back(Callee); 1739 1740 if (IsTailCall) 1741 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 1742 1743 // Add an implicit use GOT pointer in EBX. 1744 if (!IsTailCall && !Is64Bit && 1745 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1746 Subtarget->isPICStyleGOT()) 1747 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 1748 1749 // Add argument registers to the end of the list so that they are known live 1750 // into the call. 1751 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1752 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1753 RegsToPass[i].second.getValueType())); 1754 1755 if (InFlag.Val) 1756 Ops.push_back(InFlag); 1757 1758 if (IsTailCall) { 1759 assert(InFlag.Val && 1760 "Flag must be set. Depend on flag being set in LowerRET"); 1761 Chain = DAG.getNode(X86ISD::TAILCALL, 1762 Op.Val->getVTList(), &Ops[0], Ops.size()); 1763 1764 return SDOperand(Chain.Val, Op.ResNo); 1765 } 1766 1767 Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size()); 1768 InFlag = Chain.getValue(1); 1769 1770 // Create the CALLSEQ_END node. 1771 unsigned NumBytesForCalleeToPush; 1772 if (IsCalleePop(Op)) 1773 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 1774 else if (!Is64Bit && IsStructRet) 1775 // If this is is a call to a struct-return function, the callee 1776 // pops the hidden struct pointer, so we have to push it back. 1777 // This is common for Darwin/X86, Linux & Mingw32 targets. 1778 NumBytesForCalleeToPush = 4; 1779 else 1780 NumBytesForCalleeToPush = 0; // Callee pops nothing. 1781 1782 // Returns a flag for retval copy to use. 1783 Chain = DAG.getCALLSEQ_END(Chain, 1784 DAG.getIntPtrConstant(NumBytes), 1785 DAG.getIntPtrConstant(NumBytesForCalleeToPush), 1786 InFlag); 1787 InFlag = Chain.getValue(1); 1788 1789 // Handle result values, copying them out of physregs into vregs that we 1790 // return. 1791 switch (SRetMethod) { 1792 default: 1793 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1794 case X86::InGPR64: 1795 return SDOperand(LowerCallResultToTwo64BitRegs(Chain, InFlag, Op.Val, 1796 X86::RAX, X86::RDX, 1797 MVT::i64, DAG), Op.ResNo); 1798 case X86::InSSE: 1799 return SDOperand(LowerCallResultToTwo64BitRegs(Chain, InFlag, Op.Val, 1800 X86::XMM0, X86::XMM1, 1801 MVT::f64, DAG), Op.ResNo); 1802 case X86::InX87: 1803 return SDOperand(LowerCallResultToTwoX87Regs(Chain, InFlag, Op.Val, DAG), 1804 Op.ResNo); 1805 } 1806} 1807 1808 1809//===----------------------------------------------------------------------===// 1810// Fast Calling Convention (tail call) implementation 1811//===----------------------------------------------------------------------===// 1812 1813// Like std call, callee cleans arguments, convention except that ECX is 1814// reserved for storing the tail called function address. Only 2 registers are 1815// free for argument passing (inreg). Tail call optimization is performed 1816// provided: 1817// * tailcallopt is enabled 1818// * caller/callee are fastcc 1819// On X86_64 architecture with GOT-style position independent code only local 1820// (within module) calls are supported at the moment. 1821// To keep the stack aligned according to platform abi the function 1822// GetAlignedArgumentStackSize ensures that argument delta is always multiples 1823// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 1824// If a tail called function callee has more arguments than the caller the 1825// caller needs to make sure that there is room to move the RETADDR to. This is 1826// achieved by reserving an area the size of the argument delta right after the 1827// original REtADDR, but before the saved framepointer or the spilled registers 1828// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 1829// stack layout: 1830// arg1 1831// arg2 1832// RETADDR 1833// [ new RETADDR 1834// move area ] 1835// (possible EBP) 1836// ESI 1837// EDI 1838// local1 .. 1839 1840/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 1841/// for a 16 byte align requirement. 1842unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 1843 SelectionDAG& DAG) { 1844 if (PerformTailCallOpt) { 1845 MachineFunction &MF = DAG.getMachineFunction(); 1846 const TargetMachine &TM = MF.getTarget(); 1847 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 1848 unsigned StackAlignment = TFI.getStackAlignment(); 1849 uint64_t AlignMask = StackAlignment - 1; 1850 int64_t Offset = StackSize; 1851 unsigned SlotSize = Subtarget->is64Bit() ? 8 : 4; 1852 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 1853 // Number smaller than 12 so just add the difference. 1854 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 1855 } else { 1856 // Mask out lower bits, add stackalignment once plus the 12 bytes. 1857 Offset = ((~AlignMask) & Offset) + StackAlignment + 1858 (StackAlignment-SlotSize); 1859 } 1860 StackSize = Offset; 1861 } 1862 return StackSize; 1863} 1864 1865/// IsEligibleForTailCallElimination - Check to see whether the next instruction 1866/// following the call is a return. A function is eligible if caller/callee 1867/// calling conventions match, currently only fastcc supports tail calls, and 1868/// the function CALL is immediatly followed by a RET. 1869bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call, 1870 SDOperand Ret, 1871 SelectionDAG& DAG) const { 1872 if (!PerformTailCallOpt) 1873 return false; 1874 1875 // Check whether CALL node immediatly preceeds the RET node and whether the 1876 // return uses the result of the node or is a void return. 1877 unsigned NumOps = Ret.getNumOperands(); 1878 if ((NumOps == 1 && 1879 (Ret.getOperand(0) == SDOperand(Call.Val,1) || 1880 Ret.getOperand(0) == SDOperand(Call.Val,0))) || 1881 (NumOps > 1 && 1882 Ret.getOperand(0) == SDOperand(Call.Val,Call.Val->getNumValues()-1) && 1883 Ret.getOperand(1) == SDOperand(Call.Val,0))) { 1884 MachineFunction &MF = DAG.getMachineFunction(); 1885 unsigned CallerCC = MF.getFunction()->getCallingConv(); 1886 unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue(); 1887 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 1888 SDOperand Callee = Call.getOperand(4); 1889 // On x86/32Bit PIC/GOT tail calls are supported. 1890 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ || 1891 !Subtarget->isPICStyleGOT()|| !Subtarget->is64Bit()) 1892 return true; 1893 1894 // Can only do local tail calls (in same module, hidden or protected) on 1895 // x86_64 PIC/GOT at the moment. 1896 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1897 return G->getGlobal()->hasHiddenVisibility() 1898 || G->getGlobal()->hasProtectedVisibility(); 1899 } 1900 } 1901 1902 return false; 1903} 1904 1905//===----------------------------------------------------------------------===// 1906// Other Lowering Hooks 1907//===----------------------------------------------------------------------===// 1908 1909 1910SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { 1911 MachineFunction &MF = DAG.getMachineFunction(); 1912 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1913 int ReturnAddrIndex = FuncInfo->getRAIndex(); 1914 1915 if (ReturnAddrIndex == 0) { 1916 // Set up a frame object for the return address. 1917 if (Subtarget->is64Bit()) 1918 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8); 1919 else 1920 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4); 1921 1922 FuncInfo->setRAIndex(ReturnAddrIndex); 1923 } 1924 1925 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 1926} 1927 1928 1929 1930/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86 1931/// specific condition code. It returns a false if it cannot do a direct 1932/// translation. X86CC is the translated CondCode. LHS/RHS are modified as 1933/// needed. 1934static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 1935 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS, 1936 SelectionDAG &DAG) { 1937 X86CC = X86::COND_INVALID; 1938 if (!isFP) { 1939 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 1940 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 1941 // X > -1 -> X == 0, jump !sign. 1942 RHS = DAG.getConstant(0, RHS.getValueType()); 1943 X86CC = X86::COND_NS; 1944 return true; 1945 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 1946 // X < 0 -> X == 0, jump on sign. 1947 X86CC = X86::COND_S; 1948 return true; 1949 } else if (SetCCOpcode == ISD::SETLT && RHSC->getValue() == 1) { 1950 // X < 1 -> X <= 0 1951 RHS = DAG.getConstant(0, RHS.getValueType()); 1952 X86CC = X86::COND_LE; 1953 return true; 1954 } 1955 } 1956 1957 switch (SetCCOpcode) { 1958 default: break; 1959 case ISD::SETEQ: X86CC = X86::COND_E; break; 1960 case ISD::SETGT: X86CC = X86::COND_G; break; 1961 case ISD::SETGE: X86CC = X86::COND_GE; break; 1962 case ISD::SETLT: X86CC = X86::COND_L; break; 1963 case ISD::SETLE: X86CC = X86::COND_LE; break; 1964 case ISD::SETNE: X86CC = X86::COND_NE; break; 1965 case ISD::SETULT: X86CC = X86::COND_B; break; 1966 case ISD::SETUGT: X86CC = X86::COND_A; break; 1967 case ISD::SETULE: X86CC = X86::COND_BE; break; 1968 case ISD::SETUGE: X86CC = X86::COND_AE; break; 1969 } 1970 } else { 1971 // On a floating point condition, the flags are set as follows: 1972 // ZF PF CF op 1973 // 0 | 0 | 0 | X > Y 1974 // 0 | 0 | 1 | X < Y 1975 // 1 | 0 | 0 | X == Y 1976 // 1 | 1 | 1 | unordered 1977 bool Flip = false; 1978 switch (SetCCOpcode) { 1979 default: break; 1980 case ISD::SETUEQ: 1981 case ISD::SETEQ: X86CC = X86::COND_E; break; 1982 case ISD::SETOLT: Flip = true; // Fallthrough 1983 case ISD::SETOGT: 1984 case ISD::SETGT: X86CC = X86::COND_A; break; 1985 case ISD::SETOLE: Flip = true; // Fallthrough 1986 case ISD::SETOGE: 1987 case ISD::SETGE: X86CC = X86::COND_AE; break; 1988 case ISD::SETUGT: Flip = true; // Fallthrough 1989 case ISD::SETULT: 1990 case ISD::SETLT: X86CC = X86::COND_B; break; 1991 case ISD::SETUGE: Flip = true; // Fallthrough 1992 case ISD::SETULE: 1993 case ISD::SETLE: X86CC = X86::COND_BE; break; 1994 case ISD::SETONE: 1995 case ISD::SETNE: X86CC = X86::COND_NE; break; 1996 case ISD::SETUO: X86CC = X86::COND_P; break; 1997 case ISD::SETO: X86CC = X86::COND_NP; break; 1998 } 1999 if (Flip) 2000 std::swap(LHS, RHS); 2001 } 2002 2003 return X86CC != X86::COND_INVALID; 2004} 2005 2006/// hasFPCMov - is there a floating point cmov for the specific X86 condition 2007/// code. Current x86 isa includes the following FP cmov instructions: 2008/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 2009static bool hasFPCMov(unsigned X86CC) { 2010 switch (X86CC) { 2011 default: 2012 return false; 2013 case X86::COND_B: 2014 case X86::COND_BE: 2015 case X86::COND_E: 2016 case X86::COND_P: 2017 case X86::COND_A: 2018 case X86::COND_AE: 2019 case X86::COND_NE: 2020 case X86::COND_NP: 2021 return true; 2022 } 2023} 2024 2025/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return 2026/// true if Op is undef or if its value falls within the specified range (L, H]. 2027static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { 2028 if (Op.getOpcode() == ISD::UNDEF) 2029 return true; 2030 2031 unsigned Val = cast<ConstantSDNode>(Op)->getValue(); 2032 return (Val >= Low && Val < Hi); 2033} 2034 2035/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return 2036/// true if Op is undef or if its value equal to the specified value. 2037static bool isUndefOrEqual(SDOperand Op, unsigned Val) { 2038 if (Op.getOpcode() == ISD::UNDEF) 2039 return true; 2040 return cast<ConstantSDNode>(Op)->getValue() == Val; 2041} 2042 2043/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand 2044/// specifies a shuffle of elements that is suitable for input to PSHUFD. 2045bool X86::isPSHUFDMask(SDNode *N) { 2046 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2047 2048 if (N->getNumOperands() != 2 && N->getNumOperands() != 4) 2049 return false; 2050 2051 // Check if the value doesn't reference the second vector. 2052 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2053 SDOperand Arg = N->getOperand(i); 2054 if (Arg.getOpcode() == ISD::UNDEF) continue; 2055 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2056 if (cast<ConstantSDNode>(Arg)->getValue() >= e) 2057 return false; 2058 } 2059 2060 return true; 2061} 2062 2063/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand 2064/// specifies a shuffle of elements that is suitable for input to PSHUFHW. 2065bool X86::isPSHUFHWMask(SDNode *N) { 2066 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2067 2068 if (N->getNumOperands() != 8) 2069 return false; 2070 2071 // Lower quadword copied in order. 2072 for (unsigned i = 0; i != 4; ++i) { 2073 SDOperand Arg = N->getOperand(i); 2074 if (Arg.getOpcode() == ISD::UNDEF) continue; 2075 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2076 if (cast<ConstantSDNode>(Arg)->getValue() != i) 2077 return false; 2078 } 2079 2080 // Upper quadword shuffled. 2081 for (unsigned i = 4; i != 8; ++i) { 2082 SDOperand Arg = N->getOperand(i); 2083 if (Arg.getOpcode() == ISD::UNDEF) continue; 2084 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2085 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2086 if (Val < 4 || Val > 7) 2087 return false; 2088 } 2089 2090 return true; 2091} 2092 2093/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand 2094/// specifies a shuffle of elements that is suitable for input to PSHUFLW. 2095bool X86::isPSHUFLWMask(SDNode *N) { 2096 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2097 2098 if (N->getNumOperands() != 8) 2099 return false; 2100 2101 // Upper quadword copied in order. 2102 for (unsigned i = 4; i != 8; ++i) 2103 if (!isUndefOrEqual(N->getOperand(i), i)) 2104 return false; 2105 2106 // Lower quadword shuffled. 2107 for (unsigned i = 0; i != 4; ++i) 2108 if (!isUndefOrInRange(N->getOperand(i), 0, 4)) 2109 return false; 2110 2111 return true; 2112} 2113 2114/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 2115/// specifies a shuffle of elements that is suitable for input to SHUFP*. 2116static bool isSHUFPMask(const SDOperand *Elems, unsigned NumElems) { 2117 if (NumElems != 2 && NumElems != 4) return false; 2118 2119 unsigned Half = NumElems / 2; 2120 for (unsigned i = 0; i < Half; ++i) 2121 if (!isUndefOrInRange(Elems[i], 0, NumElems)) 2122 return false; 2123 for (unsigned i = Half; i < NumElems; ++i) 2124 if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2)) 2125 return false; 2126 2127 return true; 2128} 2129 2130bool X86::isSHUFPMask(SDNode *N) { 2131 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2132 return ::isSHUFPMask(N->op_begin(), N->getNumOperands()); 2133} 2134 2135/// isCommutedSHUFP - Returns true if the shuffle mask is exactly 2136/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 2137/// half elements to come from vector 1 (which would equal the dest.) and 2138/// the upper half to come from vector 2. 2139static bool isCommutedSHUFP(const SDOperand *Ops, unsigned NumOps) { 2140 if (NumOps != 2 && NumOps != 4) return false; 2141 2142 unsigned Half = NumOps / 2; 2143 for (unsigned i = 0; i < Half; ++i) 2144 if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2)) 2145 return false; 2146 for (unsigned i = Half; i < NumOps; ++i) 2147 if (!isUndefOrInRange(Ops[i], 0, NumOps)) 2148 return false; 2149 return true; 2150} 2151 2152static bool isCommutedSHUFP(SDNode *N) { 2153 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2154 return isCommutedSHUFP(N->op_begin(), N->getNumOperands()); 2155} 2156 2157/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 2158/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 2159bool X86::isMOVHLPSMask(SDNode *N) { 2160 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2161 2162 if (N->getNumOperands() != 4) 2163 return false; 2164 2165 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 2166 return isUndefOrEqual(N->getOperand(0), 6) && 2167 isUndefOrEqual(N->getOperand(1), 7) && 2168 isUndefOrEqual(N->getOperand(2), 2) && 2169 isUndefOrEqual(N->getOperand(3), 3); 2170} 2171 2172/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 2173/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 2174/// <2, 3, 2, 3> 2175bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) { 2176 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2177 2178 if (N->getNumOperands() != 4) 2179 return false; 2180 2181 // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3 2182 return isUndefOrEqual(N->getOperand(0), 2) && 2183 isUndefOrEqual(N->getOperand(1), 3) && 2184 isUndefOrEqual(N->getOperand(2), 2) && 2185 isUndefOrEqual(N->getOperand(3), 3); 2186} 2187 2188/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 2189/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 2190bool X86::isMOVLPMask(SDNode *N) { 2191 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2192 2193 unsigned NumElems = N->getNumOperands(); 2194 if (NumElems != 2 && NumElems != 4) 2195 return false; 2196 2197 for (unsigned i = 0; i < NumElems/2; ++i) 2198 if (!isUndefOrEqual(N->getOperand(i), i + NumElems)) 2199 return false; 2200 2201 for (unsigned i = NumElems/2; i < NumElems; ++i) 2202 if (!isUndefOrEqual(N->getOperand(i), i)) 2203 return false; 2204 2205 return true; 2206} 2207 2208/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand 2209/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D} 2210/// and MOVLHPS. 2211bool X86::isMOVHPMask(SDNode *N) { 2212 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2213 2214 unsigned NumElems = N->getNumOperands(); 2215 if (NumElems != 2 && NumElems != 4) 2216 return false; 2217 2218 for (unsigned i = 0; i < NumElems/2; ++i) 2219 if (!isUndefOrEqual(N->getOperand(i), i)) 2220 return false; 2221 2222 for (unsigned i = 0; i < NumElems/2; ++i) { 2223 SDOperand Arg = N->getOperand(i + NumElems/2); 2224 if (!isUndefOrEqual(Arg, i + NumElems)) 2225 return false; 2226 } 2227 2228 return true; 2229} 2230 2231/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 2232/// specifies a shuffle of elements that is suitable for input to UNPCKL. 2233bool static isUNPCKLMask(const SDOperand *Elts, unsigned NumElts, 2234 bool V2IsSplat = false) { 2235 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2236 return false; 2237 2238 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2239 SDOperand BitI = Elts[i]; 2240 SDOperand BitI1 = Elts[i+1]; 2241 if (!isUndefOrEqual(BitI, j)) 2242 return false; 2243 if (V2IsSplat) { 2244 if (isUndefOrEqual(BitI1, NumElts)) 2245 return false; 2246 } else { 2247 if (!isUndefOrEqual(BitI1, j + NumElts)) 2248 return false; 2249 } 2250 } 2251 2252 return true; 2253} 2254 2255bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) { 2256 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2257 return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2258} 2259 2260/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 2261/// specifies a shuffle of elements that is suitable for input to UNPCKH. 2262bool static isUNPCKHMask(const SDOperand *Elts, unsigned NumElts, 2263 bool V2IsSplat = false) { 2264 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2265 return false; 2266 2267 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2268 SDOperand BitI = Elts[i]; 2269 SDOperand BitI1 = Elts[i+1]; 2270 if (!isUndefOrEqual(BitI, j + NumElts/2)) 2271 return false; 2272 if (V2IsSplat) { 2273 if (isUndefOrEqual(BitI1, NumElts)) 2274 return false; 2275 } else { 2276 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts)) 2277 return false; 2278 } 2279 } 2280 2281 return true; 2282} 2283 2284bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) { 2285 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2286 return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2287} 2288 2289/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 2290/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 2291/// <0, 0, 1, 1> 2292bool X86::isUNPCKL_v_undef_Mask(SDNode *N) { 2293 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2294 2295 unsigned NumElems = N->getNumOperands(); 2296 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2297 return false; 2298 2299 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2300 SDOperand BitI = N->getOperand(i); 2301 SDOperand BitI1 = N->getOperand(i+1); 2302 2303 if (!isUndefOrEqual(BitI, j)) 2304 return false; 2305 if (!isUndefOrEqual(BitI1, j)) 2306 return false; 2307 } 2308 2309 return true; 2310} 2311 2312/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 2313/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 2314/// <2, 2, 3, 3> 2315bool X86::isUNPCKH_v_undef_Mask(SDNode *N) { 2316 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2317 2318 unsigned NumElems = N->getNumOperands(); 2319 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2320 return false; 2321 2322 for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { 2323 SDOperand BitI = N->getOperand(i); 2324 SDOperand BitI1 = N->getOperand(i + 1); 2325 2326 if (!isUndefOrEqual(BitI, j)) 2327 return false; 2328 if (!isUndefOrEqual(BitI1, j)) 2329 return false; 2330 } 2331 2332 return true; 2333} 2334 2335/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 2336/// specifies a shuffle of elements that is suitable for input to MOVSS, 2337/// MOVSD, and MOVD, i.e. setting the lowest element. 2338static bool isMOVLMask(const SDOperand *Elts, unsigned NumElts) { 2339 if (NumElts != 2 && NumElts != 4) 2340 return false; 2341 2342 if (!isUndefOrEqual(Elts[0], NumElts)) 2343 return false; 2344 2345 for (unsigned i = 1; i < NumElts; ++i) { 2346 if (!isUndefOrEqual(Elts[i], i)) 2347 return false; 2348 } 2349 2350 return true; 2351} 2352 2353bool X86::isMOVLMask(SDNode *N) { 2354 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2355 return ::isMOVLMask(N->op_begin(), N->getNumOperands()); 2356} 2357 2358/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 2359/// of what x86 movss want. X86 movs requires the lowest element to be lowest 2360/// element of vector 2 and the other elements to come from vector 1 in order. 2361static bool isCommutedMOVL(const SDOperand *Ops, unsigned NumOps, 2362 bool V2IsSplat = false, 2363 bool V2IsUndef = false) { 2364 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 2365 return false; 2366 2367 if (!isUndefOrEqual(Ops[0], 0)) 2368 return false; 2369 2370 for (unsigned i = 1; i < NumOps; ++i) { 2371 SDOperand Arg = Ops[i]; 2372 if (!(isUndefOrEqual(Arg, i+NumOps) || 2373 (V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) || 2374 (V2IsSplat && isUndefOrEqual(Arg, NumOps)))) 2375 return false; 2376 } 2377 2378 return true; 2379} 2380 2381static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false, 2382 bool V2IsUndef = false) { 2383 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2384 return isCommutedMOVL(N->op_begin(), N->getNumOperands(), 2385 V2IsSplat, V2IsUndef); 2386} 2387 2388/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2389/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 2390bool X86::isMOVSHDUPMask(SDNode *N) { 2391 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2392 2393 if (N->getNumOperands() != 4) 2394 return false; 2395 2396 // Expect 1, 1, 3, 3 2397 for (unsigned i = 0; i < 2; ++i) { 2398 SDOperand Arg = N->getOperand(i); 2399 if (Arg.getOpcode() == ISD::UNDEF) continue; 2400 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2401 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2402 if (Val != 1) return false; 2403 } 2404 2405 bool HasHi = false; 2406 for (unsigned i = 2; i < 4; ++i) { 2407 SDOperand Arg = N->getOperand(i); 2408 if (Arg.getOpcode() == ISD::UNDEF) continue; 2409 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2410 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2411 if (Val != 3) return false; 2412 HasHi = true; 2413 } 2414 2415 // Don't use movshdup if it can be done with a shufps. 2416 return HasHi; 2417} 2418 2419/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2420/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 2421bool X86::isMOVSLDUPMask(SDNode *N) { 2422 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2423 2424 if (N->getNumOperands() != 4) 2425 return false; 2426 2427 // Expect 0, 0, 2, 2 2428 for (unsigned i = 0; i < 2; ++i) { 2429 SDOperand Arg = N->getOperand(i); 2430 if (Arg.getOpcode() == ISD::UNDEF) continue; 2431 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2432 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2433 if (Val != 0) return false; 2434 } 2435 2436 bool HasHi = false; 2437 for (unsigned i = 2; i < 4; ++i) { 2438 SDOperand Arg = N->getOperand(i); 2439 if (Arg.getOpcode() == ISD::UNDEF) continue; 2440 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2441 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2442 if (Val != 2) return false; 2443 HasHi = true; 2444 } 2445 2446 // Don't use movshdup if it can be done with a shufps. 2447 return HasHi; 2448} 2449 2450/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand 2451/// specifies a identity operation on the LHS or RHS. 2452static bool isIdentityMask(SDNode *N, bool RHS = false) { 2453 unsigned NumElems = N->getNumOperands(); 2454 for (unsigned i = 0; i < NumElems; ++i) 2455 if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0))) 2456 return false; 2457 return true; 2458} 2459 2460/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2461/// a splat of a single element. 2462static bool isSplatMask(SDNode *N) { 2463 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2464 2465 // This is a splat operation if each element of the permute is the same, and 2466 // if the value doesn't reference the second vector. 2467 unsigned NumElems = N->getNumOperands(); 2468 SDOperand ElementBase; 2469 unsigned i = 0; 2470 for (; i != NumElems; ++i) { 2471 SDOperand Elt = N->getOperand(i); 2472 if (isa<ConstantSDNode>(Elt)) { 2473 ElementBase = Elt; 2474 break; 2475 } 2476 } 2477 2478 if (!ElementBase.Val) 2479 return false; 2480 2481 for (; i != NumElems; ++i) { 2482 SDOperand Arg = N->getOperand(i); 2483 if (Arg.getOpcode() == ISD::UNDEF) continue; 2484 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2485 if (Arg != ElementBase) return false; 2486 } 2487 2488 // Make sure it is a splat of the first vector operand. 2489 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems; 2490} 2491 2492/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2493/// a splat of a single element and it's a 2 or 4 element mask. 2494bool X86::isSplatMask(SDNode *N) { 2495 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2496 2497 // We can only splat 64-bit, and 32-bit quantities with a single instruction. 2498 if (N->getNumOperands() != 4 && N->getNumOperands() != 2) 2499 return false; 2500 return ::isSplatMask(N); 2501} 2502 2503/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand 2504/// specifies a splat of zero element. 2505bool X86::isSplatLoMask(SDNode *N) { 2506 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2507 2508 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) 2509 if (!isUndefOrEqual(N->getOperand(i), 0)) 2510 return false; 2511 return true; 2512} 2513 2514/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 2515/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* 2516/// instructions. 2517unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 2518 unsigned NumOperands = N->getNumOperands(); 2519 unsigned Shift = (NumOperands == 4) ? 2 : 1; 2520 unsigned Mask = 0; 2521 for (unsigned i = 0; i < NumOperands; ++i) { 2522 unsigned Val = 0; 2523 SDOperand Arg = N->getOperand(NumOperands-i-1); 2524 if (Arg.getOpcode() != ISD::UNDEF) 2525 Val = cast<ConstantSDNode>(Arg)->getValue(); 2526 if (Val >= NumOperands) Val -= NumOperands; 2527 Mask |= Val; 2528 if (i != NumOperands - 1) 2529 Mask <<= Shift; 2530 } 2531 2532 return Mask; 2533} 2534 2535/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 2536/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW 2537/// instructions. 2538unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 2539 unsigned Mask = 0; 2540 // 8 nodes, but we only care about the last 4. 2541 for (unsigned i = 7; i >= 4; --i) { 2542 unsigned Val = 0; 2543 SDOperand Arg = N->getOperand(i); 2544 if (Arg.getOpcode() != ISD::UNDEF) 2545 Val = cast<ConstantSDNode>(Arg)->getValue(); 2546 Mask |= (Val - 4); 2547 if (i != 4) 2548 Mask <<= 2; 2549 } 2550 2551 return Mask; 2552} 2553 2554/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 2555/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW 2556/// instructions. 2557unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 2558 unsigned Mask = 0; 2559 // 8 nodes, but we only care about the first 4. 2560 for (int i = 3; i >= 0; --i) { 2561 unsigned Val = 0; 2562 SDOperand Arg = N->getOperand(i); 2563 if (Arg.getOpcode() != ISD::UNDEF) 2564 Val = cast<ConstantSDNode>(Arg)->getValue(); 2565 Mask |= Val; 2566 if (i != 0) 2567 Mask <<= 2; 2568 } 2569 2570 return Mask; 2571} 2572 2573/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand 2574/// specifies a 8 element shuffle that can be broken into a pair of 2575/// PSHUFHW and PSHUFLW. 2576static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { 2577 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2578 2579 if (N->getNumOperands() != 8) 2580 return false; 2581 2582 // Lower quadword shuffled. 2583 for (unsigned i = 0; i != 4; ++i) { 2584 SDOperand Arg = N->getOperand(i); 2585 if (Arg.getOpcode() == ISD::UNDEF) continue; 2586 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2587 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2588 if (Val >= 4) 2589 return false; 2590 } 2591 2592 // Upper quadword shuffled. 2593 for (unsigned i = 4; i != 8; ++i) { 2594 SDOperand Arg = N->getOperand(i); 2595 if (Arg.getOpcode() == ISD::UNDEF) continue; 2596 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2597 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2598 if (Val < 4 || Val > 7) 2599 return false; 2600 } 2601 2602 return true; 2603} 2604 2605/// CommuteVectorShuffle - Swap vector_shuffle operands as well as 2606/// values in ther permute mask. 2607static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, 2608 SDOperand &V2, SDOperand &Mask, 2609 SelectionDAG &DAG) { 2610 MVT::ValueType VT = Op.getValueType(); 2611 MVT::ValueType MaskVT = Mask.getValueType(); 2612 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2613 unsigned NumElems = Mask.getNumOperands(); 2614 SmallVector<SDOperand, 8> MaskVec; 2615 2616 for (unsigned i = 0; i != NumElems; ++i) { 2617 SDOperand Arg = Mask.getOperand(i); 2618 if (Arg.getOpcode() == ISD::UNDEF) { 2619 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2620 continue; 2621 } 2622 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2623 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2624 if (Val < NumElems) 2625 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2626 else 2627 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2628 } 2629 2630 std::swap(V1, V2); 2631 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2632 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2633} 2634 2635/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 2636/// the two vector operands have swapped position. 2637static 2638SDOperand CommuteVectorShuffleMask(SDOperand Mask, SelectionDAG &DAG) { 2639 MVT::ValueType MaskVT = Mask.getValueType(); 2640 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2641 unsigned NumElems = Mask.getNumOperands(); 2642 SmallVector<SDOperand, 8> MaskVec; 2643 for (unsigned i = 0; i != NumElems; ++i) { 2644 SDOperand Arg = Mask.getOperand(i); 2645 if (Arg.getOpcode() == ISD::UNDEF) { 2646 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2647 continue; 2648 } 2649 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2650 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2651 if (Val < NumElems) 2652 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2653 else 2654 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2655 } 2656 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2657} 2658 2659 2660/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 2661/// match movhlps. The lower half elements should come from upper half of 2662/// V1 (and in order), and the upper half elements should come from the upper 2663/// half of V2 (and in order). 2664static bool ShouldXformToMOVHLPS(SDNode *Mask) { 2665 unsigned NumElems = Mask->getNumOperands(); 2666 if (NumElems != 4) 2667 return false; 2668 for (unsigned i = 0, e = 2; i != e; ++i) 2669 if (!isUndefOrEqual(Mask->getOperand(i), i+2)) 2670 return false; 2671 for (unsigned i = 2; i != 4; ++i) 2672 if (!isUndefOrEqual(Mask->getOperand(i), i+4)) 2673 return false; 2674 return true; 2675} 2676 2677/// isScalarLoadToVector - Returns true if the node is a scalar load that 2678/// is promoted to a vector. 2679static inline bool isScalarLoadToVector(SDNode *N) { 2680 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) { 2681 N = N->getOperand(0).Val; 2682 return ISD::isNON_EXTLoad(N); 2683 } 2684 return false; 2685} 2686 2687/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 2688/// match movlp{s|d}. The lower half elements should come from lower half of 2689/// V1 (and in order), and the upper half elements should come from the upper 2690/// half of V2 (and in order). And since V1 will become the source of the 2691/// MOVLP, it must be either a vector load or a scalar load to vector. 2692static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) { 2693 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 2694 return false; 2695 // Is V2 is a vector load, don't do this transformation. We will try to use 2696 // load folding shufps op. 2697 if (ISD::isNON_EXTLoad(V2)) 2698 return false; 2699 2700 unsigned NumElems = Mask->getNumOperands(); 2701 if (NumElems != 2 && NumElems != 4) 2702 return false; 2703 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 2704 if (!isUndefOrEqual(Mask->getOperand(i), i)) 2705 return false; 2706 for (unsigned i = NumElems/2; i != NumElems; ++i) 2707 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems)) 2708 return false; 2709 return true; 2710} 2711 2712/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 2713/// all the same. 2714static bool isSplatVector(SDNode *N) { 2715 if (N->getOpcode() != ISD::BUILD_VECTOR) 2716 return false; 2717 2718 SDOperand SplatValue = N->getOperand(0); 2719 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 2720 if (N->getOperand(i) != SplatValue) 2721 return false; 2722 return true; 2723} 2724 2725/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2726/// to an undef. 2727static bool isUndefShuffle(SDNode *N) { 2728 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2729 return false; 2730 2731 SDOperand V1 = N->getOperand(0); 2732 SDOperand V2 = N->getOperand(1); 2733 SDOperand Mask = N->getOperand(2); 2734 unsigned NumElems = Mask.getNumOperands(); 2735 for (unsigned i = 0; i != NumElems; ++i) { 2736 SDOperand Arg = Mask.getOperand(i); 2737 if (Arg.getOpcode() != ISD::UNDEF) { 2738 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2739 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF) 2740 return false; 2741 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF) 2742 return false; 2743 } 2744 } 2745 return true; 2746} 2747 2748/// isZeroNode - Returns true if Elt is a constant zero or a floating point 2749/// constant +0.0. 2750static inline bool isZeroNode(SDOperand Elt) { 2751 return ((isa<ConstantSDNode>(Elt) && 2752 cast<ConstantSDNode>(Elt)->getValue() == 0) || 2753 (isa<ConstantFPSDNode>(Elt) && 2754 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 2755} 2756 2757/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2758/// to an zero vector. 2759static bool isZeroShuffle(SDNode *N) { 2760 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2761 return false; 2762 2763 SDOperand V1 = N->getOperand(0); 2764 SDOperand V2 = N->getOperand(1); 2765 SDOperand Mask = N->getOperand(2); 2766 unsigned NumElems = Mask.getNumOperands(); 2767 for (unsigned i = 0; i != NumElems; ++i) { 2768 SDOperand Arg = Mask.getOperand(i); 2769 if (Arg.getOpcode() == ISD::UNDEF) 2770 continue; 2771 2772 unsigned Idx = cast<ConstantSDNode>(Arg)->getValue(); 2773 if (Idx < NumElems) { 2774 unsigned Opc = V1.Val->getOpcode(); 2775 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.Val)) 2776 continue; 2777 if (Opc != ISD::BUILD_VECTOR || 2778 !isZeroNode(V1.Val->getOperand(Idx))) 2779 return false; 2780 } else if (Idx >= NumElems) { 2781 unsigned Opc = V2.Val->getOpcode(); 2782 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.Val)) 2783 continue; 2784 if (Opc != ISD::BUILD_VECTOR || 2785 !isZeroNode(V2.Val->getOperand(Idx - NumElems))) 2786 return false; 2787 } 2788 } 2789 return true; 2790} 2791 2792/// getZeroVector - Returns a vector of specified type with all zero elements. 2793/// 2794static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) { 2795 assert(MVT::isVector(VT) && "Expected a vector type"); 2796 2797 // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2798 // type. This ensures they get CSE'd. 2799 SDOperand Cst = DAG.getTargetConstant(0, MVT::i32); 2800 SDOperand Vec; 2801 if (MVT::getSizeInBits(VT) == 64) // MMX 2802 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2803 else // SSE 2804 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2805 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2806} 2807 2808/// getOnesVector - Returns a vector of specified type with all bits set. 2809/// 2810static SDOperand getOnesVector(MVT::ValueType VT, SelectionDAG &DAG) { 2811 assert(MVT::isVector(VT) && "Expected a vector type"); 2812 2813 // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2814 // type. This ensures they get CSE'd. 2815 SDOperand Cst = DAG.getTargetConstant(~0U, MVT::i32); 2816 SDOperand Vec; 2817 if (MVT::getSizeInBits(VT) == 64) // MMX 2818 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2819 else // SSE 2820 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2821 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2822} 2823 2824 2825/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 2826/// that point to V2 points to its first element. 2827static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { 2828 assert(Mask.getOpcode() == ISD::BUILD_VECTOR); 2829 2830 bool Changed = false; 2831 SmallVector<SDOperand, 8> MaskVec; 2832 unsigned NumElems = Mask.getNumOperands(); 2833 for (unsigned i = 0; i != NumElems; ++i) { 2834 SDOperand Arg = Mask.getOperand(i); 2835 if (Arg.getOpcode() != ISD::UNDEF) { 2836 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2837 if (Val > NumElems) { 2838 Arg = DAG.getConstant(NumElems, Arg.getValueType()); 2839 Changed = true; 2840 } 2841 } 2842 MaskVec.push_back(Arg); 2843 } 2844 2845 if (Changed) 2846 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), 2847 &MaskVec[0], MaskVec.size()); 2848 return Mask; 2849} 2850 2851/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 2852/// operation of specified width. 2853static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { 2854 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2855 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2856 2857 SmallVector<SDOperand, 8> MaskVec; 2858 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); 2859 for (unsigned i = 1; i != NumElems; ++i) 2860 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2861 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2862} 2863 2864/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation 2865/// of specified width. 2866static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { 2867 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2868 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2869 SmallVector<SDOperand, 8> MaskVec; 2870 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 2871 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2872 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); 2873 } 2874 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2875} 2876 2877/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation 2878/// of specified width. 2879static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { 2880 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2881 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2882 unsigned Half = NumElems/2; 2883 SmallVector<SDOperand, 8> MaskVec; 2884 for (unsigned i = 0; i != Half; ++i) { 2885 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); 2886 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); 2887 } 2888 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2889} 2890 2891/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32. 2892/// 2893static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) { 2894 SDOperand V1 = Op.getOperand(0); 2895 SDOperand Mask = Op.getOperand(2); 2896 MVT::ValueType VT = Op.getValueType(); 2897 unsigned NumElems = Mask.getNumOperands(); 2898 Mask = getUnpacklMask(NumElems, DAG); 2899 while (NumElems != 4) { 2900 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask); 2901 NumElems >>= 1; 2902 } 2903 V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1); 2904 2905 Mask = getZeroVector(MVT::v4i32, DAG); 2906 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1, 2907 DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask); 2908 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); 2909} 2910 2911/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 2912/// vector of zero or undef vector. This produces a shuffle where the low 2913/// element of V2 is swizzled into the zero/undef vector, landing at element 2914/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 2915static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT, 2916 unsigned NumElems, unsigned Idx, 2917 bool isZero, SelectionDAG &DAG) { 2918 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT); 2919 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2920 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 2921 SmallVector<SDOperand, 16> MaskVec; 2922 for (unsigned i = 0; i != NumElems; ++i) 2923 if (i == Idx) // If this is the insertion idx, put the low elt of V2 here. 2924 MaskVec.push_back(DAG.getConstant(NumElems, EVT)); 2925 else 2926 MaskVec.push_back(DAG.getConstant(i, EVT)); 2927 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2928 &MaskVec[0], MaskVec.size()); 2929 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2930} 2931 2932/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 2933/// 2934static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, 2935 unsigned NumNonZero, unsigned NumZero, 2936 SelectionDAG &DAG, TargetLowering &TLI) { 2937 if (NumNonZero > 8) 2938 return SDOperand(); 2939 2940 SDOperand V(0, 0); 2941 bool First = true; 2942 for (unsigned i = 0; i < 16; ++i) { 2943 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 2944 if (ThisIsNonZero && First) { 2945 if (NumZero) 2946 V = getZeroVector(MVT::v8i16, DAG); 2947 else 2948 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2949 First = false; 2950 } 2951 2952 if ((i & 1) != 0) { 2953 SDOperand ThisElt(0, 0), LastElt(0, 0); 2954 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 2955 if (LastIsNonZero) { 2956 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); 2957 } 2958 if (ThisIsNonZero) { 2959 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i)); 2960 ThisElt = DAG.getNode(ISD::SHL, MVT::i16, 2961 ThisElt, DAG.getConstant(8, MVT::i8)); 2962 if (LastIsNonZero) 2963 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt); 2964 } else 2965 ThisElt = LastElt; 2966 2967 if (ThisElt.Val) 2968 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt, 2969 DAG.getIntPtrConstant(i/2)); 2970 } 2971 } 2972 2973 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V); 2974} 2975 2976/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 2977/// 2978static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, 2979 unsigned NumNonZero, unsigned NumZero, 2980 SelectionDAG &DAG, TargetLowering &TLI) { 2981 if (NumNonZero > 4) 2982 return SDOperand(); 2983 2984 SDOperand V(0, 0); 2985 bool First = true; 2986 for (unsigned i = 0; i < 8; ++i) { 2987 bool isNonZero = (NonZeros & (1 << i)) != 0; 2988 if (isNonZero) { 2989 if (First) { 2990 if (NumZero) 2991 V = getZeroVector(MVT::v8i16, DAG); 2992 else 2993 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2994 First = false; 2995 } 2996 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i), 2997 DAG.getIntPtrConstant(i)); 2998 } 2999 } 3000 3001 return V; 3002} 3003 3004SDOperand 3005X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3006 // All zero's are handled with pxor, all one's are handled with pcmpeqd. 3007 if (ISD::isBuildVectorAllZeros(Op.Val) || ISD::isBuildVectorAllOnes(Op.Val)) { 3008 // Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to 3009 // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are 3010 // eliminated on x86-32 hosts. 3011 if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32) 3012 return Op; 3013 3014 if (ISD::isBuildVectorAllOnes(Op.Val)) 3015 return getOnesVector(Op.getValueType(), DAG); 3016 return getZeroVector(Op.getValueType(), DAG); 3017 } 3018 3019 MVT::ValueType VT = Op.getValueType(); 3020 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3021 unsigned EVTBits = MVT::getSizeInBits(EVT); 3022 3023 unsigned NumElems = Op.getNumOperands(); 3024 unsigned NumZero = 0; 3025 unsigned NumNonZero = 0; 3026 unsigned NonZeros = 0; 3027 bool HasNonImms = false; 3028 SmallSet<SDOperand, 8> Values; 3029 for (unsigned i = 0; i < NumElems; ++i) { 3030 SDOperand Elt = Op.getOperand(i); 3031 if (Elt.getOpcode() == ISD::UNDEF) 3032 continue; 3033 Values.insert(Elt); 3034 if (Elt.getOpcode() != ISD::Constant && 3035 Elt.getOpcode() != ISD::ConstantFP) 3036 HasNonImms = true; 3037 if (isZeroNode(Elt)) 3038 NumZero++; 3039 else { 3040 NonZeros |= (1 << i); 3041 NumNonZero++; 3042 } 3043 } 3044 3045 if (NumNonZero == 0) { 3046 // All undef vector. Return an UNDEF. All zero vectors were handled above. 3047 return DAG.getNode(ISD::UNDEF, VT); 3048 } 3049 3050 // Splat is obviously ok. Let legalizer expand it to a shuffle. 3051 if (Values.size() == 1) 3052 return SDOperand(); 3053 3054 // Special case for single non-zero element. 3055 if (NumNonZero == 1 && NumElems <= 4) { 3056 unsigned Idx = CountTrailingZeros_32(NonZeros); 3057 SDOperand Item = Op.getOperand(Idx); 3058 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 3059 if (Idx == 0) 3060 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 3061 return getShuffleVectorZeroOrUndef(Item, VT, NumElems, Idx, 3062 NumZero > 0, DAG); 3063 else if (!HasNonImms) // Otherwise, it's better to do a constpool load. 3064 return SDOperand(); 3065 3066 if (EVTBits == 32) { 3067 // Turn it into a shuffle of zero and zero-extended scalar to vector. 3068 Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0, 3069 DAG); 3070 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3071 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3072 SmallVector<SDOperand, 8> MaskVec; 3073 for (unsigned i = 0; i < NumElems; i++) 3074 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); 3075 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3076 &MaskVec[0], MaskVec.size()); 3077 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item, 3078 DAG.getNode(ISD::UNDEF, VT), Mask); 3079 } 3080 } 3081 3082 // A vector full of immediates; various special cases are already 3083 // handled, so this is best done with a single constant-pool load. 3084 if (!HasNonImms) 3085 return SDOperand(); 3086 3087 // Let legalizer expand 2-wide build_vectors. 3088 if (EVTBits == 64) 3089 return SDOperand(); 3090 3091 // If element VT is < 32 bits, convert it to inserts into a zero vector. 3092 if (EVTBits == 8 && NumElems == 16) { 3093 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 3094 *this); 3095 if (V.Val) return V; 3096 } 3097 3098 if (EVTBits == 16 && NumElems == 8) { 3099 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 3100 *this); 3101 if (V.Val) return V; 3102 } 3103 3104 // If element VT is == 32 bits, turn it into a number of shuffles. 3105 SmallVector<SDOperand, 8> V; 3106 V.resize(NumElems); 3107 if (NumElems == 4 && NumZero > 0) { 3108 for (unsigned i = 0; i < 4; ++i) { 3109 bool isZero = !(NonZeros & (1 << i)); 3110 if (isZero) 3111 V[i] = getZeroVector(VT, DAG); 3112 else 3113 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3114 } 3115 3116 for (unsigned i = 0; i < 2; ++i) { 3117 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 3118 default: break; 3119 case 0: 3120 V[i] = V[i*2]; // Must be a zero vector. 3121 break; 3122 case 1: 3123 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2], 3124 getMOVLMask(NumElems, DAG)); 3125 break; 3126 case 2: 3127 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3128 getMOVLMask(NumElems, DAG)); 3129 break; 3130 case 3: 3131 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3132 getUnpacklMask(NumElems, DAG)); 3133 break; 3134 } 3135 } 3136 3137 // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd) 3138 // clears the upper bits. 3139 // FIXME: we can do the same for v4f32 case when we know both parts of 3140 // the lower half come from scalar_to_vector (loadf32). We should do 3141 // that in post legalizer dag combiner with target specific hooks. 3142 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0) 3143 return V[0]; 3144 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3145 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 3146 SmallVector<SDOperand, 8> MaskVec; 3147 bool Reverse = (NonZeros & 0x3) == 2; 3148 for (unsigned i = 0; i < 2; ++i) 3149 if (Reverse) 3150 MaskVec.push_back(DAG.getConstant(1-i, EVT)); 3151 else 3152 MaskVec.push_back(DAG.getConstant(i, EVT)); 3153 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 3154 for (unsigned i = 0; i < 2; ++i) 3155 if (Reverse) 3156 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); 3157 else 3158 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); 3159 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3160 &MaskVec[0], MaskVec.size()); 3161 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask); 3162 } 3163 3164 if (Values.size() > 2) { 3165 // Expand into a number of unpckl*. 3166 // e.g. for v4f32 3167 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 3168 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 3169 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 3170 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG); 3171 for (unsigned i = 0; i < NumElems; ++i) 3172 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3173 NumElems >>= 1; 3174 while (NumElems != 0) { 3175 for (unsigned i = 0; i < NumElems; ++i) 3176 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems], 3177 UnpckMask); 3178 NumElems >>= 1; 3179 } 3180 return V[0]; 3181 } 3182 3183 return SDOperand(); 3184} 3185 3186static 3187SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, 3188 SDOperand PermMask, SelectionDAG &DAG, 3189 TargetLowering &TLI) { 3190 SDOperand NewV; 3191 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(8); 3192 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3193 MVT::ValueType PtrVT = TLI.getPointerTy(); 3194 SmallVector<SDOperand, 8> MaskElts(PermMask.Val->op_begin(), 3195 PermMask.Val->op_end()); 3196 3197 // First record which half of which vector the low elements come from. 3198 SmallVector<unsigned, 4> LowQuad(4); 3199 for (unsigned i = 0; i < 4; ++i) { 3200 SDOperand Elt = MaskElts[i]; 3201 if (Elt.getOpcode() == ISD::UNDEF) 3202 continue; 3203 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3204 int QuadIdx = EltIdx / 4; 3205 ++LowQuad[QuadIdx]; 3206 } 3207 int BestLowQuad = -1; 3208 unsigned MaxQuad = 1; 3209 for (unsigned i = 0; i < 4; ++i) { 3210 if (LowQuad[i] > MaxQuad) { 3211 BestLowQuad = i; 3212 MaxQuad = LowQuad[i]; 3213 } 3214 } 3215 3216 // Record which half of which vector the high elements come from. 3217 SmallVector<unsigned, 4> HighQuad(4); 3218 for (unsigned i = 4; i < 8; ++i) { 3219 SDOperand Elt = MaskElts[i]; 3220 if (Elt.getOpcode() == ISD::UNDEF) 3221 continue; 3222 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3223 int QuadIdx = EltIdx / 4; 3224 ++HighQuad[QuadIdx]; 3225 } 3226 int BestHighQuad = -1; 3227 MaxQuad = 1; 3228 for (unsigned i = 0; i < 4; ++i) { 3229 if (HighQuad[i] > MaxQuad) { 3230 BestHighQuad = i; 3231 MaxQuad = HighQuad[i]; 3232 } 3233 } 3234 3235 // If it's possible to sort parts of either half with PSHUF{H|L}W, then do it. 3236 if (BestLowQuad != -1 || BestHighQuad != -1) { 3237 // First sort the 4 chunks in order using shufpd. 3238 SmallVector<SDOperand, 8> MaskVec; 3239 if (BestLowQuad != -1) 3240 MaskVec.push_back(DAG.getConstant(BestLowQuad, MVT::i32)); 3241 else 3242 MaskVec.push_back(DAG.getConstant(0, MVT::i32)); 3243 if (BestHighQuad != -1) 3244 MaskVec.push_back(DAG.getConstant(BestHighQuad, MVT::i32)); 3245 else 3246 MaskVec.push_back(DAG.getConstant(1, MVT::i32)); 3247 SDOperand Mask= DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, &MaskVec[0],2); 3248 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64, 3249 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V1), 3250 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V2), Mask); 3251 NewV = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, NewV); 3252 3253 // Now sort high and low parts separately. 3254 BitVector InOrder(8); 3255 if (BestLowQuad != -1) { 3256 // Sort lower half in order using PSHUFLW. 3257 MaskVec.clear(); 3258 bool AnyOutOrder = false; 3259 for (unsigned i = 0; i != 4; ++i) { 3260 SDOperand Elt = MaskElts[i]; 3261 if (Elt.getOpcode() == ISD::UNDEF) { 3262 MaskVec.push_back(Elt); 3263 InOrder.set(i); 3264 } else { 3265 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3266 if (EltIdx != i) 3267 AnyOutOrder = true; 3268 MaskVec.push_back(DAG.getConstant(EltIdx % 4, MaskEVT)); 3269 // If this element is in the right place after this shuffle, then 3270 // remember it. 3271 if ((int)(EltIdx / 4) == BestLowQuad) 3272 InOrder.set(i); 3273 } 3274 } 3275 if (AnyOutOrder) { 3276 for (unsigned i = 4; i != 8; ++i) 3277 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3278 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3279 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3280 } 3281 } 3282 3283 if (BestHighQuad != -1) { 3284 // Sort high half in order using PSHUFHW if possible. 3285 MaskVec.clear(); 3286 for (unsigned i = 0; i != 4; ++i) 3287 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3288 bool AnyOutOrder = false; 3289 for (unsigned i = 4; i != 8; ++i) { 3290 SDOperand Elt = MaskElts[i]; 3291 if (Elt.getOpcode() == ISD::UNDEF) { 3292 MaskVec.push_back(Elt); 3293 InOrder.set(i); 3294 } else { 3295 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3296 if (EltIdx != i) 3297 AnyOutOrder = true; 3298 MaskVec.push_back(DAG.getConstant((EltIdx % 4) + 4, MaskEVT)); 3299 // If this element is in the right place after this shuffle, then 3300 // remember it. 3301 if ((int)(EltIdx / 4) == BestHighQuad) 3302 InOrder.set(i); 3303 } 3304 } 3305 if (AnyOutOrder) { 3306 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3307 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3308 } 3309 } 3310 3311 // The other elements are put in the right place using pextrw and pinsrw. 3312 for (unsigned i = 0; i != 8; ++i) { 3313 if (InOrder[i]) 3314 continue; 3315 SDOperand Elt = MaskElts[i]; 3316 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3317 if (EltIdx == i) 3318 continue; 3319 SDOperand ExtOp = (EltIdx < 8) 3320 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3321 DAG.getConstant(EltIdx, PtrVT)) 3322 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3323 DAG.getConstant(EltIdx - 8, PtrVT)); 3324 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3325 DAG.getConstant(i, PtrVT)); 3326 } 3327 return NewV; 3328 } 3329 3330 // PSHUF{H|L}W are not used. Lower into extracts and inserts but try to use 3331 ///as few as possible. 3332 // First, let's find out how many elements are already in the right order. 3333 unsigned V1InOrder = 0; 3334 unsigned V1FromV1 = 0; 3335 unsigned V2InOrder = 0; 3336 unsigned V2FromV2 = 0; 3337 SmallVector<SDOperand, 8> V1Elts; 3338 SmallVector<SDOperand, 8> V2Elts; 3339 for (unsigned i = 0; i < 8; ++i) { 3340 SDOperand Elt = MaskElts[i]; 3341 if (Elt.getOpcode() == ISD::UNDEF) { 3342 V1Elts.push_back(Elt); 3343 V2Elts.push_back(Elt); 3344 ++V1InOrder; 3345 ++V2InOrder; 3346 continue; 3347 } 3348 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3349 if (EltIdx == i) { 3350 V1Elts.push_back(Elt); 3351 V2Elts.push_back(DAG.getConstant(i+8, MaskEVT)); 3352 ++V1InOrder; 3353 } else if (EltIdx == i+8) { 3354 V1Elts.push_back(Elt); 3355 V2Elts.push_back(DAG.getConstant(i, MaskEVT)); 3356 ++V2InOrder; 3357 } else if (EltIdx < 8) { 3358 V1Elts.push_back(Elt); 3359 ++V1FromV1; 3360 } else { 3361 V2Elts.push_back(DAG.getConstant(EltIdx-8, MaskEVT)); 3362 ++V2FromV2; 3363 } 3364 } 3365 3366 if (V2InOrder > V1InOrder) { 3367 PermMask = CommuteVectorShuffleMask(PermMask, DAG); 3368 std::swap(V1, V2); 3369 std::swap(V1Elts, V2Elts); 3370 std::swap(V1FromV1, V2FromV2); 3371 } 3372 3373 if ((V1FromV1 + V1InOrder) != 8) { 3374 // Some elements are from V2. 3375 if (V1FromV1) { 3376 // If there are elements that are from V1 but out of place, 3377 // then first sort them in place 3378 SmallVector<SDOperand, 8> MaskVec; 3379 for (unsigned i = 0; i < 8; ++i) { 3380 SDOperand Elt = V1Elts[i]; 3381 if (Elt.getOpcode() == ISD::UNDEF) { 3382 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3383 continue; 3384 } 3385 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3386 if (EltIdx >= 8) 3387 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3388 else 3389 MaskVec.push_back(DAG.getConstant(EltIdx, MaskEVT)); 3390 } 3391 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3392 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, V1, V1, Mask); 3393 } 3394 3395 NewV = V1; 3396 for (unsigned i = 0; i < 8; ++i) { 3397 SDOperand Elt = V1Elts[i]; 3398 if (Elt.getOpcode() == ISD::UNDEF) 3399 continue; 3400 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3401 if (EltIdx < 8) 3402 continue; 3403 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3404 DAG.getConstant(EltIdx - 8, PtrVT)); 3405 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3406 DAG.getConstant(i, PtrVT)); 3407 } 3408 return NewV; 3409 } else { 3410 // All elements are from V1. 3411 NewV = V1; 3412 for (unsigned i = 0; i < 8; ++i) { 3413 SDOperand Elt = V1Elts[i]; 3414 if (Elt.getOpcode() == ISD::UNDEF) 3415 continue; 3416 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3417 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3418 DAG.getConstant(EltIdx, PtrVT)); 3419 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3420 DAG.getConstant(i, PtrVT)); 3421 } 3422 return NewV; 3423 } 3424} 3425 3426/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 3427/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be 3428/// done when every pair / quad of shuffle mask elements point to elements in 3429/// the right sequence. e.g. 3430/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15> 3431static 3432SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2, 3433 MVT::ValueType VT, 3434 SDOperand PermMask, SelectionDAG &DAG, 3435 TargetLowering &TLI) { 3436 unsigned NumElems = PermMask.getNumOperands(); 3437 unsigned NewWidth = (NumElems == 4) ? 2 : 4; 3438 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NewWidth); 3439 MVT::ValueType NewVT = MaskVT; 3440 switch (VT) { 3441 case MVT::v4f32: NewVT = MVT::v2f64; break; 3442 case MVT::v4i32: NewVT = MVT::v2i64; break; 3443 case MVT::v8i16: NewVT = MVT::v4i32; break; 3444 case MVT::v16i8: NewVT = MVT::v4i32; break; 3445 default: assert(false && "Unexpected!"); 3446 } 3447 3448 if (NewWidth == 2) { 3449 if (MVT::isInteger(VT)) 3450 NewVT = MVT::v2i64; 3451 else 3452 NewVT = MVT::v2f64; 3453 } 3454 unsigned Scale = NumElems / NewWidth; 3455 SmallVector<SDOperand, 8> MaskVec; 3456 for (unsigned i = 0; i < NumElems; i += Scale) { 3457 unsigned StartIdx = ~0U; 3458 for (unsigned j = 0; j < Scale; ++j) { 3459 SDOperand Elt = PermMask.getOperand(i+j); 3460 if (Elt.getOpcode() == ISD::UNDEF) 3461 continue; 3462 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3463 if (StartIdx == ~0U) 3464 StartIdx = EltIdx - (EltIdx % Scale); 3465 if (EltIdx != StartIdx + j) 3466 return SDOperand(); 3467 } 3468 if (StartIdx == ~0U) 3469 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MVT::i32)); 3470 else 3471 MaskVec.push_back(DAG.getConstant(StartIdx / Scale, MVT::i32)); 3472 } 3473 3474 V1 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V1); 3475 V2 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V2); 3476 return DAG.getNode(ISD::VECTOR_SHUFFLE, NewVT, V1, V2, 3477 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3478 &MaskVec[0], MaskVec.size())); 3479} 3480 3481SDOperand 3482X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 3483 SDOperand V1 = Op.getOperand(0); 3484 SDOperand V2 = Op.getOperand(1); 3485 SDOperand PermMask = Op.getOperand(2); 3486 MVT::ValueType VT = Op.getValueType(); 3487 unsigned NumElems = PermMask.getNumOperands(); 3488 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 3489 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 3490 bool V1IsSplat = false; 3491 bool V2IsSplat = false; 3492 3493 if (isUndefShuffle(Op.Val)) 3494 return DAG.getNode(ISD::UNDEF, VT); 3495 3496 if (isZeroShuffle(Op.Val)) 3497 return getZeroVector(VT, DAG); 3498 3499 if (isIdentityMask(PermMask.Val)) 3500 return V1; 3501 else if (isIdentityMask(PermMask.Val, true)) 3502 return V2; 3503 3504 if (isSplatMask(PermMask.Val)) { 3505 if (NumElems <= 4) return Op; 3506 // Promote it to a v4i32 splat. 3507 return PromoteSplat(Op, DAG); 3508 } 3509 3510 // If the shuffle can be profitably rewritten as a narrower shuffle, then 3511 // do it! 3512 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 3513 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3514 if (NewOp.Val) 3515 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3516 } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 3517 // FIXME: Figure out a cleaner way to do this. 3518 // Try to make use of movq to zero out the top part. 3519 if (ISD::isBuildVectorAllZeros(V2.Val)) { 3520 SDOperand NewOp = RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3521 if (NewOp.Val) { 3522 SDOperand NewV1 = NewOp.getOperand(0); 3523 SDOperand NewV2 = NewOp.getOperand(1); 3524 SDOperand NewMask = NewOp.getOperand(2); 3525 if (isCommutedMOVL(NewMask.Val, true, false)) { 3526 NewOp = CommuteVectorShuffle(NewOp, NewV1, NewV2, NewMask, DAG); 3527 NewOp = DAG.getNode(ISD::VECTOR_SHUFFLE, NewOp.getValueType(), 3528 NewV1, NewV2, getMOVLMask(2, DAG)); 3529 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3530 } 3531 } 3532 } else if (ISD::isBuildVectorAllZeros(V1.Val)) { 3533 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3534 if (NewOp.Val && X86::isMOVLMask(NewOp.getOperand(2).Val)) 3535 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3536 } 3537 } 3538 3539 if (X86::isMOVLMask(PermMask.Val)) 3540 return (V1IsUndef) ? V2 : Op; 3541 3542 if (X86::isMOVSHDUPMask(PermMask.Val) || 3543 X86::isMOVSLDUPMask(PermMask.Val) || 3544 X86::isMOVHLPSMask(PermMask.Val) || 3545 X86::isMOVHPMask(PermMask.Val) || 3546 X86::isMOVLPMask(PermMask.Val)) 3547 return Op; 3548 3549 if (ShouldXformToMOVHLPS(PermMask.Val) || 3550 ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val)) 3551 return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3552 3553 bool Commuted = false; 3554 // FIXME: This should also accept a bitcast of a splat? Be careful, not 3555 // 1,1,1,1 -> v8i16 though. 3556 V1IsSplat = isSplatVector(V1.Val); 3557 V2IsSplat = isSplatVector(V2.Val); 3558 3559 // Canonicalize the splat or undef, if present, to be on the RHS. 3560 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 3561 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3562 std::swap(V1IsSplat, V2IsSplat); 3563 std::swap(V1IsUndef, V2IsUndef); 3564 Commuted = true; 3565 } 3566 3567 // FIXME: Figure out a cleaner way to do this. 3568 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) { 3569 if (V2IsUndef) return V1; 3570 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3571 if (V2IsSplat) { 3572 // V2 is a splat, so the mask may be malformed. That is, it may point 3573 // to any V2 element. The instruction selectior won't like this. Get 3574 // a corrected mask and commute to form a proper MOVS{S|D}. 3575 SDOperand NewMask = getMOVLMask(NumElems, DAG); 3576 if (NewMask.Val != PermMask.Val) 3577 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3578 } 3579 return Op; 3580 } 3581 3582 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3583 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3584 X86::isUNPCKLMask(PermMask.Val) || 3585 X86::isUNPCKHMask(PermMask.Val)) 3586 return Op; 3587 3588 if (V2IsSplat) { 3589 // Normalize mask so all entries that point to V2 points to its first 3590 // element then try to match unpck{h|l} again. If match, return a 3591 // new vector_shuffle with the corrected mask. 3592 SDOperand NewMask = NormalizeMask(PermMask, DAG); 3593 if (NewMask.Val != PermMask.Val) { 3594 if (X86::isUNPCKLMask(PermMask.Val, true)) { 3595 SDOperand NewMask = getUnpacklMask(NumElems, DAG); 3596 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3597 } else if (X86::isUNPCKHMask(PermMask.Val, true)) { 3598 SDOperand NewMask = getUnpackhMask(NumElems, DAG); 3599 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3600 } 3601 } 3602 } 3603 3604 // Normalize the node to match x86 shuffle ops if needed 3605 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val)) 3606 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3607 3608 if (Commuted) { 3609 // Commute is back and try unpck* again. 3610 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3611 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3612 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3613 X86::isUNPCKLMask(PermMask.Val) || 3614 X86::isUNPCKHMask(PermMask.Val)) 3615 return Op; 3616 } 3617 3618 // If VT is integer, try PSHUF* first, then SHUFP*. 3619 if (MVT::isInteger(VT)) { 3620 // MMX doesn't have PSHUFD; it does have PSHUFW. While it's theoretically 3621 // possible to shuffle a v2i32 using PSHUFW, that's not yet implemented. 3622 if (((MVT::getSizeInBits(VT) != 64 || NumElems == 4) && 3623 X86::isPSHUFDMask(PermMask.Val)) || 3624 X86::isPSHUFHWMask(PermMask.Val) || 3625 X86::isPSHUFLWMask(PermMask.Val)) { 3626 if (V2.getOpcode() != ISD::UNDEF) 3627 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3628 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3629 return Op; 3630 } 3631 3632 if (X86::isSHUFPMask(PermMask.Val) && 3633 MVT::getSizeInBits(VT) != 64) // Don't do this for MMX. 3634 return Op; 3635 } else { 3636 // Floating point cases in the other order. 3637 if (X86::isSHUFPMask(PermMask.Val)) 3638 return Op; 3639 if (X86::isPSHUFDMask(PermMask.Val) || 3640 X86::isPSHUFHWMask(PermMask.Val) || 3641 X86::isPSHUFLWMask(PermMask.Val)) { 3642 if (V2.getOpcode() != ISD::UNDEF) 3643 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3644 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3645 return Op; 3646 } 3647 } 3648 3649 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 3650 if (VT == MVT::v8i16) { 3651 SDOperand NewOp = LowerVECTOR_SHUFFLEv8i16(V1, V2, PermMask, DAG, *this); 3652 if (NewOp.Val) 3653 return NewOp; 3654 } 3655 3656 // Handle all 4 wide cases with a number of shuffles. 3657 if (NumElems == 4 && MVT::getSizeInBits(VT) != 64) { 3658 // Don't do this for MMX. 3659 MVT::ValueType MaskVT = PermMask.getValueType(); 3660 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3661 SmallVector<std::pair<int, int>, 8> Locs; 3662 Locs.reserve(NumElems); 3663 SmallVector<SDOperand, 8> Mask1(NumElems, 3664 DAG.getNode(ISD::UNDEF, MaskEVT)); 3665 SmallVector<SDOperand, 8> Mask2(NumElems, 3666 DAG.getNode(ISD::UNDEF, MaskEVT)); 3667 unsigned NumHi = 0; 3668 unsigned NumLo = 0; 3669 // If no more than two elements come from either vector. This can be 3670 // implemented with two shuffles. First shuffle gather the elements. 3671 // The second shuffle, which takes the first shuffle as both of its 3672 // vector operands, put the elements into the right order. 3673 for (unsigned i = 0; i != NumElems; ++i) { 3674 SDOperand Elt = PermMask.getOperand(i); 3675 if (Elt.getOpcode() == ISD::UNDEF) { 3676 Locs[i] = std::make_pair(-1, -1); 3677 } else { 3678 unsigned Val = cast<ConstantSDNode>(Elt)->getValue(); 3679 if (Val < NumElems) { 3680 Locs[i] = std::make_pair(0, NumLo); 3681 Mask1[NumLo] = Elt; 3682 NumLo++; 3683 } else { 3684 Locs[i] = std::make_pair(1, NumHi); 3685 if (2+NumHi < NumElems) 3686 Mask1[2+NumHi] = Elt; 3687 NumHi++; 3688 } 3689 } 3690 } 3691 if (NumLo <= 2 && NumHi <= 2) { 3692 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3693 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3694 &Mask1[0], Mask1.size())); 3695 for (unsigned i = 0; i != NumElems; ++i) { 3696 if (Locs[i].first == -1) 3697 continue; 3698 else { 3699 unsigned Idx = (i < NumElems/2) ? 0 : NumElems; 3700 Idx += Locs[i].first * (NumElems/2) + Locs[i].second; 3701 Mask2[i] = DAG.getConstant(Idx, MaskEVT); 3702 } 3703 } 3704 3705 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, 3706 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3707 &Mask2[0], Mask2.size())); 3708 } 3709 3710 // Break it into (shuffle shuffle_hi, shuffle_lo). 3711 Locs.clear(); 3712 SmallVector<SDOperand,8> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3713 SmallVector<SDOperand,8> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3714 SmallVector<SDOperand,8> *MaskPtr = &LoMask; 3715 unsigned MaskIdx = 0; 3716 unsigned LoIdx = 0; 3717 unsigned HiIdx = NumElems/2; 3718 for (unsigned i = 0; i != NumElems; ++i) { 3719 if (i == NumElems/2) { 3720 MaskPtr = &HiMask; 3721 MaskIdx = 1; 3722 LoIdx = 0; 3723 HiIdx = NumElems/2; 3724 } 3725 SDOperand Elt = PermMask.getOperand(i); 3726 if (Elt.getOpcode() == ISD::UNDEF) { 3727 Locs[i] = std::make_pair(-1, -1); 3728 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) { 3729 Locs[i] = std::make_pair(MaskIdx, LoIdx); 3730 (*MaskPtr)[LoIdx] = Elt; 3731 LoIdx++; 3732 } else { 3733 Locs[i] = std::make_pair(MaskIdx, HiIdx); 3734 (*MaskPtr)[HiIdx] = Elt; 3735 HiIdx++; 3736 } 3737 } 3738 3739 SDOperand LoShuffle = 3740 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3741 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3742 &LoMask[0], LoMask.size())); 3743 SDOperand HiShuffle = 3744 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3745 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3746 &HiMask[0], HiMask.size())); 3747 SmallVector<SDOperand, 8> MaskOps; 3748 for (unsigned i = 0; i != NumElems; ++i) { 3749 if (Locs[i].first == -1) { 3750 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3751 } else { 3752 unsigned Idx = Locs[i].first * NumElems + Locs[i].second; 3753 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT)); 3754 } 3755 } 3756 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle, 3757 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3758 &MaskOps[0], MaskOps.size())); 3759 } 3760 3761 return SDOperand(); 3762} 3763 3764SDOperand 3765X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDOperand Op, 3766 SelectionDAG &DAG) { 3767 MVT::ValueType VT = Op.getValueType(); 3768 if (MVT::getSizeInBits(VT) == 8) { 3769 SDOperand Extract = DAG.getNode(X86ISD::PEXTRB, MVT::i32, 3770 Op.getOperand(0), Op.getOperand(1)); 3771 SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, 3772 DAG.getValueType(VT)); 3773 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3774 } else if (MVT::getSizeInBits(VT) == 16) { 3775 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, MVT::i32, 3776 Op.getOperand(0), Op.getOperand(1)); 3777 SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, 3778 DAG.getValueType(VT)); 3779 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3780 } 3781 return SDOperand(); 3782} 3783 3784 3785SDOperand 3786X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3787 if (!isa<ConstantSDNode>(Op.getOperand(1))) 3788 return SDOperand(); 3789 3790 if (Subtarget->hasSSE41()) 3791 return LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 3792 3793 MVT::ValueType VT = Op.getValueType(); 3794 // TODO: handle v16i8. 3795 if (MVT::getSizeInBits(VT) == 16) { 3796 SDOperand Vec = Op.getOperand(0); 3797 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3798 if (Idx == 0) 3799 return DAG.getNode(ISD::TRUNCATE, MVT::i16, 3800 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, 3801 DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Vec), 3802 Op.getOperand(1))); 3803 // Transform it so it match pextrw which produces a 32-bit result. 3804 MVT::ValueType EVT = (MVT::ValueType)(VT+1); 3805 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, 3806 Op.getOperand(0), Op.getOperand(1)); 3807 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, 3808 DAG.getValueType(VT)); 3809 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3810 } else if (MVT::getSizeInBits(VT) == 32) { 3811 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3812 if (Idx == 0) 3813 return Op; 3814 // SHUFPS the element to the lowest double word, then movss. 3815 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3816 SmallVector<SDOperand, 8> IdxVec; 3817 IdxVec. 3818 push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT))); 3819 IdxVec. 3820 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3821 IdxVec. 3822 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3823 IdxVec. 3824 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3825 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3826 &IdxVec[0], IdxVec.size()); 3827 SDOperand Vec = Op.getOperand(0); 3828 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3829 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3830 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3831 DAG.getIntPtrConstant(0)); 3832 } else if (MVT::getSizeInBits(VT) == 64) { 3833 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 3834 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 3835 // to match extract_elt for f64. 3836 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3837 if (Idx == 0) 3838 return Op; 3839 3840 // UNPCKHPD the element to the lowest double word, then movsd. 3841 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 3842 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 3843 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3844 SmallVector<SDOperand, 8> IdxVec; 3845 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT))); 3846 IdxVec. 3847 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3848 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3849 &IdxVec[0], IdxVec.size()); 3850 SDOperand Vec = Op.getOperand(0); 3851 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3852 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3853 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3854 DAG.getIntPtrConstant(0)); 3855 } 3856 3857 return SDOperand(); 3858} 3859 3860SDOperand 3861X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG){ 3862 MVT::ValueType VT = Op.getValueType(); 3863 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3864 3865 SDOperand N0 = Op.getOperand(0); 3866 SDOperand N1 = Op.getOperand(1); 3867 SDOperand N2 = Op.getOperand(2); 3868 3869 if ((MVT::getSizeInBits(EVT) == 8) || (MVT::getSizeInBits(EVT) == 16)) { 3870 unsigned Opc = (MVT::getSizeInBits(EVT) == 8) ? X86ISD::PINSRB 3871 : X86ISD::PINSRW; 3872 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 3873 // argument. 3874 if (N1.getValueType() != MVT::i32) 3875 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3876 if (N2.getValueType() != MVT::i32) 3877 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); 3878 return DAG.getNode(Opc, VT, N0, N1, N2); 3879 } else if (EVT == MVT::f32) { 3880 // Bits [7:6] of the constant are the source select. This will always be 3881 // zero here. The DAG Combiner may combine an extract_elt index into these 3882 // bits. For example (insert (extract, 3), 2) could be matched by putting 3883 // the '3' into bits [7:6] of X86ISD::INSERTPS. 3884 // Bits [5:4] of the constant are the destination select. This is the 3885 // value of the incoming immediate. 3886 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 3887 // combine either bitwise AND or insert of float 0.0 to set these bits. 3888 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue() << 4); 3889 return DAG.getNode(X86ISD::INSERTPS, VT, N0, N1, N2); 3890 } 3891 return SDOperand(); 3892} 3893 3894SDOperand 3895X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3896 MVT::ValueType VT = Op.getValueType(); 3897 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3898 3899 if (Subtarget->hasSSE41()) 3900 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 3901 3902 if (EVT == MVT::i8) 3903 return SDOperand(); 3904 3905 SDOperand N0 = Op.getOperand(0); 3906 SDOperand N1 = Op.getOperand(1); 3907 SDOperand N2 = Op.getOperand(2); 3908 3909 if (MVT::getSizeInBits(EVT) == 16) { 3910 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 3911 // as its second argument. 3912 if (N1.getValueType() != MVT::i32) 3913 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3914 if (N2.getValueType() != MVT::i32) 3915 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); 3916 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2); 3917 } 3918 return SDOperand(); 3919} 3920 3921SDOperand 3922X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3923 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); 3924 MVT::ValueType VT = MVT::v2i32; 3925 switch (Op.getValueType()) { 3926 default: break; 3927 case MVT::v16i8: 3928 case MVT::v8i16: 3929 VT = MVT::v4i32; 3930 break; 3931 } 3932 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), 3933 DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, AnyExt)); 3934} 3935 3936// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 3937// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 3938// one of the above mentioned nodes. It has to be wrapped because otherwise 3939// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 3940// be used to form addressing mode. These wrapped nodes will be selected 3941// into MOV32ri. 3942SDOperand 3943X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 3944 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 3945 SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(), 3946 getPointerTy(), 3947 CP->getAlignment()); 3948 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3949 // With PIC, the address is actually $g + Offset. 3950 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3951 !Subtarget->isPICStyleRIPRel()) { 3952 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3953 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3954 Result); 3955 } 3956 3957 return Result; 3958} 3959 3960SDOperand 3961X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 3962 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3963 SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); 3964 // If it's a debug information descriptor, don't mess with it. 3965 if (DAG.isVerifiedDebugInfoDesc(Op)) 3966 return Result; 3967 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3968 // With PIC, the address is actually $g + Offset. 3969 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3970 !Subtarget->isPICStyleRIPRel()) { 3971 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3972 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3973 Result); 3974 } 3975 3976 // For Darwin & Mingw32, external and weak symbols are indirect, so we want to 3977 // load the value at address GV, not the value of GV itself. This means that 3978 // the GlobalAddress must be in the base or index register of the address, not 3979 // the GV offset field. Platform check is inside GVRequiresExtraLoad() call 3980 // The same applies for external symbols during PIC codegen 3981 if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)) 3982 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, 3983 PseudoSourceValue::getGOT(), 0); 3984 3985 return Result; 3986} 3987 3988// Lower ISD::GlobalTLSAddress using the "general dynamic" model 3989static SDOperand 3990LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 3991 const MVT::ValueType PtrVT) { 3992 SDOperand InFlag; 3993 SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX, 3994 DAG.getNode(X86ISD::GlobalBaseReg, 3995 PtrVT), InFlag); 3996 InFlag = Chain.getValue(1); 3997 3998 // emit leal symbol@TLSGD(,%ebx,1), %eax 3999 SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag); 4000 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 4001 GA->getValueType(0), 4002 GA->getOffset()); 4003 SDOperand Ops[] = { Chain, TGA, InFlag }; 4004 SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3); 4005 InFlag = Result.getValue(2); 4006 Chain = Result.getValue(1); 4007 4008 // call ___tls_get_addr. This function receives its argument in 4009 // the register EAX. 4010 Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag); 4011 InFlag = Chain.getValue(1); 4012 4013 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4014 SDOperand Ops1[] = { Chain, 4015 DAG.getTargetExternalSymbol("___tls_get_addr", 4016 PtrVT), 4017 DAG.getRegister(X86::EAX, PtrVT), 4018 DAG.getRegister(X86::EBX, PtrVT), 4019 InFlag }; 4020 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5); 4021 InFlag = Chain.getValue(1); 4022 4023 return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag); 4024} 4025 4026// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 4027// "local exec" model. 4028static SDOperand 4029LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 4030 const MVT::ValueType PtrVT) { 4031 // Get the Thread Pointer 4032 SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT); 4033 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 4034 // exec) 4035 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 4036 GA->getValueType(0), 4037 GA->getOffset()); 4038 SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA); 4039 4040 if (GA->getGlobal()->isDeclaration()) // initial exec TLS model 4041 Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, 4042 PseudoSourceValue::getGOT(), 0); 4043 4044 // The address of the thread local variable is the add of the thread 4045 // pointer with the offset of the variable. 4046 return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset); 4047} 4048 4049SDOperand 4050X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { 4051 // TODO: implement the "local dynamic" model 4052 // TODO: implement the "initial exec"model for pic executables 4053 assert(!Subtarget->is64Bit() && Subtarget->isTargetELF() && 4054 "TLS not implemented for non-ELF and 64-bit targets"); 4055 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 4056 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 4057 // otherwise use the "Local Exec"TLS Model 4058 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 4059 return LowerToTLSGeneralDynamicModel(GA, DAG, getPointerTy()); 4060 else 4061 return LowerToTLSExecModel(GA, DAG, getPointerTy()); 4062} 4063 4064SDOperand 4065X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { 4066 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 4067 SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 4068 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4069 // With PIC, the address is actually $g + Offset. 4070 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4071 !Subtarget->isPICStyleRIPRel()) { 4072 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4073 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4074 Result); 4075 } 4076 4077 return Result; 4078} 4079 4080SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 4081 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 4082 SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); 4083 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4084 // With PIC, the address is actually $g + Offset. 4085 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4086 !Subtarget->isPICStyleRIPRel()) { 4087 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4088 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4089 Result); 4090 } 4091 4092 return Result; 4093} 4094 4095/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and 4096/// take a 2 x i32 value to shift plus a shift amount. 4097SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { 4098 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 4099 MVT::ValueType VT = Op.getValueType(); 4100 unsigned VTBits = MVT::getSizeInBits(VT); 4101 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 4102 SDOperand ShOpLo = Op.getOperand(0); 4103 SDOperand ShOpHi = Op.getOperand(1); 4104 SDOperand ShAmt = Op.getOperand(2); 4105 SDOperand Tmp1 = isSRA ? 4106 DAG.getNode(ISD::SRA, VT, ShOpHi, DAG.getConstant(VTBits - 1, MVT::i8)) : 4107 DAG.getConstant(0, VT); 4108 4109 SDOperand Tmp2, Tmp3; 4110 if (Op.getOpcode() == ISD::SHL_PARTS) { 4111 Tmp2 = DAG.getNode(X86ISD::SHLD, VT, ShOpHi, ShOpLo, ShAmt); 4112 Tmp3 = DAG.getNode(ISD::SHL, VT, ShOpLo, ShAmt); 4113 } else { 4114 Tmp2 = DAG.getNode(X86ISD::SHRD, VT, ShOpLo, ShOpHi, ShAmt); 4115 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, VT, ShOpHi, ShAmt); 4116 } 4117 4118 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4119 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, 4120 DAG.getConstant(VTBits, MVT::i8)); 4121 SDOperand Cond = DAG.getNode(X86ISD::CMP, VT, 4122 AndNode, DAG.getConstant(0, MVT::i8)); 4123 4124 SDOperand Hi, Lo; 4125 SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4126 VTs = DAG.getNodeValueTypes(VT, MVT::Flag); 4127 SmallVector<SDOperand, 4> Ops; 4128 if (Op.getOpcode() == ISD::SHL_PARTS) { 4129 Ops.push_back(Tmp2); 4130 Ops.push_back(Tmp3); 4131 Ops.push_back(CC); 4132 Ops.push_back(Cond); 4133 Hi = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4134 4135 Ops.clear(); 4136 Ops.push_back(Tmp3); 4137 Ops.push_back(Tmp1); 4138 Ops.push_back(CC); 4139 Ops.push_back(Cond); 4140 Lo = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4141 } else { 4142 Ops.push_back(Tmp2); 4143 Ops.push_back(Tmp3); 4144 Ops.push_back(CC); 4145 Ops.push_back(Cond); 4146 Lo = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4147 4148 Ops.clear(); 4149 Ops.push_back(Tmp3); 4150 Ops.push_back(Tmp1); 4151 Ops.push_back(CC); 4152 Ops.push_back(Cond); 4153 Hi = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4154 } 4155 4156 VTs = DAG.getNodeValueTypes(VT, VT); 4157 Ops.clear(); 4158 Ops.push_back(Lo); 4159 Ops.push_back(Hi); 4160 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size()); 4161} 4162 4163SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 4164 MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); 4165 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 && 4166 "Unknown SINT_TO_FP to lower!"); 4167 4168 // These are really Legal; caller falls through into that case. 4169 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 4170 return SDOperand(); 4171 if (SrcVT == MVT::i64 && Op.getValueType() != MVT::f80 && 4172 Subtarget->is64Bit()) 4173 return SDOperand(); 4174 4175 unsigned Size = MVT::getSizeInBits(SrcVT)/8; 4176 MachineFunction &MF = DAG.getMachineFunction(); 4177 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 4178 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4179 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), 4180 StackSlot, 4181 PseudoSourceValue::getFixedStack(), 4182 SSFI); 4183 4184 // Build the FILD 4185 SDVTList Tys; 4186 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 4187 if (useSSE) 4188 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 4189 else 4190 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 4191 SmallVector<SDOperand, 8> Ops; 4192 Ops.push_back(Chain); 4193 Ops.push_back(StackSlot); 4194 Ops.push_back(DAG.getValueType(SrcVT)); 4195 SDOperand Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, 4196 Tys, &Ops[0], Ops.size()); 4197 4198 if (useSSE) { 4199 Chain = Result.getValue(1); 4200 SDOperand InFlag = Result.getValue(2); 4201 4202 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 4203 // shouldn't be necessary except that RFP cannot be live across 4204 // multiple blocks. When stackifier is fixed, they can be uncoupled. 4205 MachineFunction &MF = DAG.getMachineFunction(); 4206 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 4207 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4208 Tys = DAG.getVTList(MVT::Other); 4209 SmallVector<SDOperand, 8> Ops; 4210 Ops.push_back(Chain); 4211 Ops.push_back(Result); 4212 Ops.push_back(StackSlot); 4213 Ops.push_back(DAG.getValueType(Op.getValueType())); 4214 Ops.push_back(InFlag); 4215 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 4216 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, 4217 PseudoSourceValue::getFixedStack(), SSFI); 4218 } 4219 4220 return Result; 4221} 4222 4223std::pair<SDOperand,SDOperand> X86TargetLowering:: 4224FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) { 4225 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 && 4226 "Unknown FP_TO_SINT to lower!"); 4227 4228 // These are really Legal. 4229 if (Op.getValueType() == MVT::i32 && 4230 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 4231 return std::make_pair(SDOperand(), SDOperand()); 4232 if (Subtarget->is64Bit() && 4233 Op.getValueType() == MVT::i64 && 4234 Op.getOperand(0).getValueType() != MVT::f80) 4235 return std::make_pair(SDOperand(), SDOperand()); 4236 4237 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 4238 // stack slot. 4239 MachineFunction &MF = DAG.getMachineFunction(); 4240 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; 4241 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4242 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4243 unsigned Opc; 4244 switch (Op.getValueType()) { 4245 default: assert(0 && "Invalid FP_TO_SINT to lower!"); 4246 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 4247 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 4248 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 4249 } 4250 4251 SDOperand Chain = DAG.getEntryNode(); 4252 SDOperand Value = Op.getOperand(0); 4253 if (isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) { 4254 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 4255 Chain = DAG.getStore(Chain, Value, StackSlot, 4256 PseudoSourceValue::getFixedStack(), SSFI); 4257 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 4258 SDOperand Ops[] = { 4259 Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType()) 4260 }; 4261 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 4262 Chain = Value.getValue(1); 4263 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4264 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4265 } 4266 4267 // Build the FP_TO_INT*_IN_MEM 4268 SDOperand Ops[] = { Chain, Value, StackSlot }; 4269 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3); 4270 4271 return std::make_pair(FIST, StackSlot); 4272} 4273 4274SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 4275 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(Op, DAG); 4276 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4277 if (FIST.Val == 0) return SDOperand(); 4278 4279 // Load the result. 4280 return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0); 4281} 4282 4283SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) { 4284 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(SDOperand(N, 0), DAG); 4285 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4286 if (FIST.Val == 0) return 0; 4287 4288 // Return an i64 load from the stack slot. 4289 SDOperand Res = DAG.getLoad(MVT::i64, FIST, StackSlot, NULL, 0); 4290 4291 // Use a MERGE_VALUES node to drop the chain result value. 4292 return DAG.getNode(ISD::MERGE_VALUES, MVT::i64, Res).Val; 4293} 4294 4295SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { 4296 MVT::ValueType VT = Op.getValueType(); 4297 MVT::ValueType EltVT = VT; 4298 if (MVT::isVector(VT)) 4299 EltVT = MVT::getVectorElementType(VT); 4300 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 4301 std::vector<Constant*> CV; 4302 if (EltVT == MVT::f64) { 4303 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, ~(1ULL << 63)))); 4304 CV.push_back(C); 4305 CV.push_back(C); 4306 } else { 4307 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, ~(1U << 31)))); 4308 CV.push_back(C); 4309 CV.push_back(C); 4310 CV.push_back(C); 4311 CV.push_back(C); 4312 } 4313 Constant *C = ConstantVector::get(CV); 4314 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4315 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4316 PseudoSourceValue::getConstantPool(), 0, 4317 false, 16); 4318 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask); 4319} 4320 4321SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { 4322 MVT::ValueType VT = Op.getValueType(); 4323 MVT::ValueType EltVT = VT; 4324 unsigned EltNum = 1; 4325 if (MVT::isVector(VT)) { 4326 EltVT = MVT::getVectorElementType(VT); 4327 EltNum = MVT::getVectorNumElements(VT); 4328 } 4329 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 4330 std::vector<Constant*> CV; 4331 if (EltVT == MVT::f64) { 4332 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, 1ULL << 63))); 4333 CV.push_back(C); 4334 CV.push_back(C); 4335 } else { 4336 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, 1U << 31))); 4337 CV.push_back(C); 4338 CV.push_back(C); 4339 CV.push_back(C); 4340 CV.push_back(C); 4341 } 4342 Constant *C = ConstantVector::get(CV); 4343 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4344 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4345 PseudoSourceValue::getConstantPool(), 0, 4346 false, 16); 4347 if (MVT::isVector(VT)) { 4348 return DAG.getNode(ISD::BIT_CONVERT, VT, 4349 DAG.getNode(ISD::XOR, MVT::v2i64, 4350 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)), 4351 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Mask))); 4352 } else { 4353 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask); 4354 } 4355} 4356 4357SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { 4358 SDOperand Op0 = Op.getOperand(0); 4359 SDOperand Op1 = Op.getOperand(1); 4360 MVT::ValueType VT = Op.getValueType(); 4361 MVT::ValueType SrcVT = Op1.getValueType(); 4362 const Type *SrcTy = MVT::getTypeForValueType(SrcVT); 4363 4364 // If second operand is smaller, extend it first. 4365 if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) { 4366 Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1); 4367 SrcVT = VT; 4368 SrcTy = MVT::getTypeForValueType(SrcVT); 4369 } 4370 // And if it is bigger, shrink it first. 4371 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4372 Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1, DAG.getIntPtrConstant(1)); 4373 SrcVT = VT; 4374 SrcTy = MVT::getTypeForValueType(SrcVT); 4375 } 4376 4377 // At this point the operands and the result should have the same 4378 // type, and that won't be f80 since that is not custom lowered. 4379 4380 // First get the sign bit of second operand. 4381 std::vector<Constant*> CV; 4382 if (SrcVT == MVT::f64) { 4383 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 1ULL << 63)))); 4384 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4385 } else { 4386 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 1U << 31)))); 4387 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4388 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4389 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4390 } 4391 Constant *C = ConstantVector::get(CV); 4392 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4393 SDOperand Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx, 4394 PseudoSourceValue::getConstantPool(), 0, 4395 false, 16); 4396 SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); 4397 4398 // Shift sign bit right or left if the two operands have different types. 4399 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4400 // Op0 is MVT::f32, Op1 is MVT::f64. 4401 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit); 4402 SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit, 4403 DAG.getConstant(32, MVT::i32)); 4404 SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit); 4405 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit, 4406 DAG.getIntPtrConstant(0)); 4407 } 4408 4409 // Clear first operand sign bit. 4410 CV.clear(); 4411 if (VT == MVT::f64) { 4412 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, ~(1ULL << 63))))); 4413 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4414 } else { 4415 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, ~(1U << 31))))); 4416 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4417 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4418 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4419 } 4420 C = ConstantVector::get(CV); 4421 CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4422 SDOperand Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4423 PseudoSourceValue::getConstantPool(), 0, 4424 false, 16); 4425 SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2); 4426 4427 // Or the value with the sign bit. 4428 return DAG.getNode(X86ISD::FOR, VT, Val, SignBit); 4429} 4430 4431SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { 4432 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 4433 SDOperand Cond; 4434 SDOperand Op0 = Op.getOperand(0); 4435 SDOperand Op1 = Op.getOperand(1); 4436 SDOperand CC = Op.getOperand(2); 4437 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 4438 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); 4439 unsigned X86CC; 4440 4441 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, 4442 Op0, Op1, DAG)) { 4443 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4444 return DAG.getNode(X86ISD::SETCC, MVT::i8, 4445 DAG.getConstant(X86CC, MVT::i8), Cond); 4446 } 4447 4448 assert(isFP && "Illegal integer SetCC!"); 4449 4450 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4451 switch (SetCCOpcode) { 4452 default: assert(false && "Illegal floating point SetCC!"); 4453 case ISD::SETOEQ: { // !PF & ZF 4454 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4455 DAG.getConstant(X86::COND_NP, MVT::i8), Cond); 4456 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4457 DAG.getConstant(X86::COND_E, MVT::i8), Cond); 4458 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); 4459 } 4460 case ISD::SETUNE: { // PF | !ZF 4461 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4462 DAG.getConstant(X86::COND_P, MVT::i8), Cond); 4463 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4464 DAG.getConstant(X86::COND_NE, MVT::i8), Cond); 4465 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); 4466 } 4467 } 4468} 4469 4470 4471SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { 4472 bool addTest = true; 4473 SDOperand Cond = Op.getOperand(0); 4474 SDOperand CC; 4475 4476 if (Cond.getOpcode() == ISD::SETCC) 4477 Cond = LowerSETCC(Cond, DAG); 4478 4479 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4480 // setting operand in place of the X86ISD::SETCC. 4481 if (Cond.getOpcode() == X86ISD::SETCC) { 4482 CC = Cond.getOperand(0); 4483 4484 SDOperand Cmp = Cond.getOperand(1); 4485 unsigned Opc = Cmp.getOpcode(); 4486 MVT::ValueType VT = Op.getValueType(); 4487 4488 bool IllegalFPCMov = false; 4489 if (MVT::isFloatingPoint(VT) && !MVT::isVector(VT) && 4490 !isScalarFPTypeInSSEReg(VT)) // FPStack? 4491 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 4492 4493 if ((Opc == X86ISD::CMP || 4494 Opc == X86ISD::COMI || 4495 Opc == X86ISD::UCOMI) && !IllegalFPCMov) { 4496 Cond = Cmp; 4497 addTest = false; 4498 } 4499 } 4500 4501 if (addTest) { 4502 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4503 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4504 } 4505 4506 const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(), 4507 MVT::Flag); 4508 SmallVector<SDOperand, 4> Ops; 4509 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 4510 // condition is true. 4511 Ops.push_back(Op.getOperand(2)); 4512 Ops.push_back(Op.getOperand(1)); 4513 Ops.push_back(CC); 4514 Ops.push_back(Cond); 4515 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 4516} 4517 4518SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { 4519 bool addTest = true; 4520 SDOperand Chain = Op.getOperand(0); 4521 SDOperand Cond = Op.getOperand(1); 4522 SDOperand Dest = Op.getOperand(2); 4523 SDOperand CC; 4524 4525 if (Cond.getOpcode() == ISD::SETCC) 4526 Cond = LowerSETCC(Cond, DAG); 4527 4528 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4529 // setting operand in place of the X86ISD::SETCC. 4530 if (Cond.getOpcode() == X86ISD::SETCC) { 4531 CC = Cond.getOperand(0); 4532 4533 SDOperand Cmp = Cond.getOperand(1); 4534 unsigned Opc = Cmp.getOpcode(); 4535 if (Opc == X86ISD::CMP || 4536 Opc == X86ISD::COMI || 4537 Opc == X86ISD::UCOMI) { 4538 Cond = Cmp; 4539 addTest = false; 4540 } 4541 } 4542 4543 if (addTest) { 4544 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4545 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4546 } 4547 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), 4548 Chain, Op.getOperand(2), CC, Cond); 4549} 4550 4551 4552// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 4553// Calls to _alloca is needed to probe the stack when allocating more than 4k 4554// bytes in one go. Touching the stack at 4K increments is necessary to ensure 4555// that the guard pages used by the OS virtual memory manager are allocated in 4556// correct sequence. 4557SDOperand 4558X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, 4559 SelectionDAG &DAG) { 4560 assert(Subtarget->isTargetCygMing() && 4561 "This should be used only on Cygwin/Mingw targets"); 4562 4563 // Get the inputs. 4564 SDOperand Chain = Op.getOperand(0); 4565 SDOperand Size = Op.getOperand(1); 4566 // FIXME: Ensure alignment here 4567 4568 SDOperand Flag; 4569 4570 MVT::ValueType IntPtr = getPointerTy(); 4571 MVT::ValueType SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; 4572 4573 Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag); 4574 Flag = Chain.getValue(1); 4575 4576 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4577 SDOperand Ops[] = { Chain, 4578 DAG.getTargetExternalSymbol("_alloca", IntPtr), 4579 DAG.getRegister(X86::EAX, IntPtr), 4580 Flag }; 4581 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4); 4582 Flag = Chain.getValue(1); 4583 4584 Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1); 4585 4586 std::vector<MVT::ValueType> Tys; 4587 Tys.push_back(SPTy); 4588 Tys.push_back(MVT::Other); 4589 SDOperand Ops1[2] = { Chain.getValue(0), Chain }; 4590 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2); 4591} 4592 4593SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) { 4594 SDOperand InFlag(0, 0); 4595 SDOperand Chain = Op.getOperand(0); 4596 unsigned Align = 4597 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 4598 if (Align == 0) Align = 1; 4599 4600 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4601 // If not DWORD aligned or size is more than the threshold, call memset. 4602 // The libc version is likely to be faster for these cases. It can use the 4603 // address value and run time information about the CPU. 4604 if ((Align & 3) != 0 || 4605 (I && I->getValue() > Subtarget->getMaxInlineSizeThreshold())) { 4606 MVT::ValueType IntPtr = getPointerTy(); 4607 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 4608 TargetLowering::ArgListTy Args; 4609 TargetLowering::ArgListEntry Entry; 4610 Entry.Node = Op.getOperand(1); 4611 Entry.Ty = IntPtrTy; 4612 Args.push_back(Entry); 4613 // Extend the unsigned i8 argument to be an int value for the call. 4614 Entry.Node = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2)); 4615 Entry.Ty = IntPtrTy; 4616 Args.push_back(Entry); 4617 Entry.Node = Op.getOperand(3); 4618 Args.push_back(Entry); 4619 std::pair<SDOperand,SDOperand> CallResult = 4620 LowerCallTo(Chain, Type::VoidTy, false, false, false, CallingConv::C, 4621 false, DAG.getExternalSymbol("memset", IntPtr), Args, DAG); 4622 return CallResult.second; 4623 } 4624 4625 MVT::ValueType AVT; 4626 SDOperand Count; 4627 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4628 unsigned BytesLeft = 0; 4629 bool TwoRepStos = false; 4630 if (ValC) { 4631 unsigned ValReg; 4632 uint64_t Val = ValC->getValue() & 255; 4633 4634 // If the value is a constant, then we can potentially use larger sets. 4635 switch (Align & 3) { 4636 case 2: // WORD aligned 4637 AVT = MVT::i16; 4638 ValReg = X86::AX; 4639 Val = (Val << 8) | Val; 4640 break; 4641 case 0: // DWORD aligned 4642 AVT = MVT::i32; 4643 ValReg = X86::EAX; 4644 Val = (Val << 8) | Val; 4645 Val = (Val << 16) | Val; 4646 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned 4647 AVT = MVT::i64; 4648 ValReg = X86::RAX; 4649 Val = (Val << 32) | Val; 4650 } 4651 break; 4652 default: // Byte aligned 4653 AVT = MVT::i8; 4654 ValReg = X86::AL; 4655 Count = Op.getOperand(3); 4656 break; 4657 } 4658 4659 if (AVT > MVT::i8) { 4660 if (I) { 4661 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4662 Count = DAG.getIntPtrConstant(I->getValue() / UBytes); 4663 BytesLeft = I->getValue() % UBytes; 4664 } else { 4665 assert(AVT >= MVT::i32 && 4666 "Do not use rep;stos if not at least DWORD aligned"); 4667 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 4668 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 4669 TwoRepStos = true; 4670 } 4671 } 4672 4673 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT), 4674 InFlag); 4675 InFlag = Chain.getValue(1); 4676 } else { 4677 AVT = MVT::i8; 4678 Count = Op.getOperand(3); 4679 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag); 4680 InFlag = Chain.getValue(1); 4681 } 4682 4683 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4684 Count, InFlag); 4685 InFlag = Chain.getValue(1); 4686 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4687 Op.getOperand(1), InFlag); 4688 InFlag = Chain.getValue(1); 4689 4690 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4691 SmallVector<SDOperand, 8> Ops; 4692 Ops.push_back(Chain); 4693 Ops.push_back(DAG.getValueType(AVT)); 4694 Ops.push_back(InFlag); 4695 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4696 4697 if (TwoRepStos) { 4698 InFlag = Chain.getValue(1); 4699 Count = Op.getOperand(3); 4700 MVT::ValueType CVT = Count.getValueType(); 4701 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4702 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4703 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4704 Left, InFlag); 4705 InFlag = Chain.getValue(1); 4706 Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4707 Ops.clear(); 4708 Ops.push_back(Chain); 4709 Ops.push_back(DAG.getValueType(MVT::i8)); 4710 Ops.push_back(InFlag); 4711 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4712 } else if (BytesLeft) { 4713 // Issue stores for the last 1 - 7 bytes. 4714 SDOperand Value; 4715 unsigned Val = ValC->getValue() & 255; 4716 unsigned Offset = I->getValue() - BytesLeft; 4717 SDOperand DstAddr = Op.getOperand(1); 4718 MVT::ValueType AddrVT = DstAddr.getValueType(); 4719 if (BytesLeft >= 4) { 4720 Val = (Val << 8) | Val; 4721 Val = (Val << 16) | Val; 4722 Value = DAG.getConstant(Val, MVT::i32); 4723 Chain = DAG.getStore(Chain, Value, 4724 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4725 DAG.getConstant(Offset, AddrVT)), 4726 NULL, 0); 4727 BytesLeft -= 4; 4728 Offset += 4; 4729 } 4730 if (BytesLeft >= 2) { 4731 Value = DAG.getConstant((Val << 8) | Val, MVT::i16); 4732 Chain = DAG.getStore(Chain, Value, 4733 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4734 DAG.getConstant(Offset, AddrVT)), 4735 NULL, 0); 4736 BytesLeft -= 2; 4737 Offset += 2; 4738 } 4739 if (BytesLeft == 1) { 4740 Value = DAG.getConstant(Val, MVT::i8); 4741 Chain = DAG.getStore(Chain, Value, 4742 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4743 DAG.getConstant(Offset, AddrVT)), 4744 NULL, 0); 4745 } 4746 } 4747 4748 return Chain; 4749} 4750 4751SDOperand X86TargetLowering::LowerMEMCPYInline(SDOperand Chain, 4752 SDOperand Dest, 4753 SDOperand Source, 4754 unsigned Size, 4755 unsigned Align, 4756 SelectionDAG &DAG) { 4757 MVT::ValueType AVT; 4758 unsigned BytesLeft = 0; 4759 switch (Align & 3) { 4760 case 2: // WORD aligned 4761 AVT = MVT::i16; 4762 break; 4763 case 0: // DWORD aligned 4764 AVT = MVT::i32; 4765 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned 4766 AVT = MVT::i64; 4767 break; 4768 default: // Byte aligned 4769 AVT = MVT::i8; 4770 break; 4771 } 4772 4773 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4774 SDOperand Count = DAG.getIntPtrConstant(Size / UBytes); 4775 BytesLeft = Size % UBytes; 4776 4777 SDOperand InFlag(0, 0); 4778 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4779 Count, InFlag); 4780 InFlag = Chain.getValue(1); 4781 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4782 Dest, InFlag); 4783 InFlag = Chain.getValue(1); 4784 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI, 4785 Source, InFlag); 4786 InFlag = Chain.getValue(1); 4787 4788 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4789 SmallVector<SDOperand, 8> Ops; 4790 Ops.push_back(Chain); 4791 Ops.push_back(DAG.getValueType(AVT)); 4792 Ops.push_back(InFlag); 4793 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4794 4795 if (BytesLeft) { 4796 // Issue loads and stores for the last 1 - 7 bytes. 4797 unsigned Offset = Size - BytesLeft; 4798 SDOperand DstAddr = Dest; 4799 MVT::ValueType DstVT = DstAddr.getValueType(); 4800 SDOperand SrcAddr = Source; 4801 MVT::ValueType SrcVT = SrcAddr.getValueType(); 4802 SDOperand Value; 4803 if (BytesLeft >= 4) { 4804 Value = DAG.getLoad(MVT::i32, Chain, 4805 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4806 DAG.getConstant(Offset, SrcVT)), 4807 NULL, 0); 4808 Chain = Value.getValue(1); 4809 Chain = DAG.getStore(Chain, Value, 4810 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4811 DAG.getConstant(Offset, DstVT)), 4812 NULL, 0); 4813 BytesLeft -= 4; 4814 Offset += 4; 4815 } 4816 if (BytesLeft >= 2) { 4817 Value = DAG.getLoad(MVT::i16, Chain, 4818 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4819 DAG.getConstant(Offset, SrcVT)), 4820 NULL, 0); 4821 Chain = Value.getValue(1); 4822 Chain = DAG.getStore(Chain, Value, 4823 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4824 DAG.getConstant(Offset, DstVT)), 4825 NULL, 0); 4826 BytesLeft -= 2; 4827 Offset += 2; 4828 } 4829 4830 if (BytesLeft == 1) { 4831 Value = DAG.getLoad(MVT::i8, Chain, 4832 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4833 DAG.getConstant(Offset, SrcVT)), 4834 NULL, 0); 4835 Chain = Value.getValue(1); 4836 Chain = DAG.getStore(Chain, Value, 4837 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4838 DAG.getConstant(Offset, DstVT)), 4839 NULL, 0); 4840 } 4841 } 4842 4843 return Chain; 4844} 4845 4846/// Expand the result of: i64,outchain = READCYCLECOUNTER inchain 4847SDNode *X86TargetLowering::ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG){ 4848 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4849 SDOperand TheChain = N->getOperand(0); 4850 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheChain, 1); 4851 if (Subtarget->is64Bit()) { 4852 SDOperand rax = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); 4853 SDOperand rdx = DAG.getCopyFromReg(rax.getValue(1), X86::RDX, 4854 MVT::i64, rax.getValue(2)); 4855 SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, rdx, 4856 DAG.getConstant(32, MVT::i8)); 4857 SDOperand Ops[] = { 4858 DAG.getNode(ISD::OR, MVT::i64, rax, Tmp), rdx.getValue(1) 4859 }; 4860 4861 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4862 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4863 } 4864 4865 SDOperand eax = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); 4866 SDOperand edx = DAG.getCopyFromReg(eax.getValue(1), X86::EDX, 4867 MVT::i32, eax.getValue(2)); 4868 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 4869 SDOperand Ops[] = { eax, edx }; 4870 Ops[0] = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Ops, 2); 4871 4872 // Use a MERGE_VALUES to return the value and chain. 4873 Ops[1] = edx.getValue(1); 4874 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4875 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4876} 4877 4878SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { 4879 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 4880 4881 if (!Subtarget->is64Bit()) { 4882 // vastart just stores the address of the VarArgsFrameIndex slot into the 4883 // memory location argument. 4884 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4885 return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV, 0); 4886 } 4887 4888 // __va_list_tag: 4889 // gp_offset (0 - 6 * 8) 4890 // fp_offset (48 - 48 + 8 * 16) 4891 // overflow_arg_area (point to parameters coming in memory). 4892 // reg_save_area 4893 SmallVector<SDOperand, 8> MemOps; 4894 SDOperand FIN = Op.getOperand(1); 4895 // Store gp_offset 4896 SDOperand Store = DAG.getStore(Op.getOperand(0), 4897 DAG.getConstant(VarArgsGPOffset, MVT::i32), 4898 FIN, SV, 0); 4899 MemOps.push_back(Store); 4900 4901 // Store fp_offset 4902 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4903 Store = DAG.getStore(Op.getOperand(0), 4904 DAG.getConstant(VarArgsFPOffset, MVT::i32), 4905 FIN, SV, 0); 4906 MemOps.push_back(Store); 4907 4908 // Store ptr to overflow_arg_area 4909 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4910 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4911 Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV, 0); 4912 MemOps.push_back(Store); 4913 4914 // Store ptr to reg_save_area. 4915 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); 4916 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 4917 Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV, 0); 4918 MemOps.push_back(Store); 4919 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size()); 4920} 4921 4922SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) { 4923 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 4924 SDOperand Chain = Op.getOperand(0); 4925 SDOperand DstPtr = Op.getOperand(1); 4926 SDOperand SrcPtr = Op.getOperand(2); 4927 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 4928 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 4929 4930 SrcPtr = DAG.getLoad(getPointerTy(), Chain, SrcPtr, SrcSV, 0); 4931 Chain = SrcPtr.getValue(1); 4932 for (unsigned i = 0; i < 3; ++i) { 4933 SDOperand Val = DAG.getLoad(MVT::i64, Chain, SrcPtr, SrcSV, 0); 4934 Chain = Val.getValue(1); 4935 Chain = DAG.getStore(Chain, Val, DstPtr, DstSV, 0); 4936 if (i == 2) 4937 break; 4938 SrcPtr = DAG.getNode(ISD::ADD, getPointerTy(), SrcPtr, 4939 DAG.getIntPtrConstant(8)); 4940 DstPtr = DAG.getNode(ISD::ADD, getPointerTy(), DstPtr, 4941 DAG.getIntPtrConstant(8)); 4942 } 4943 return Chain; 4944} 4945 4946SDOperand 4947X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 4948 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); 4949 switch (IntNo) { 4950 default: return SDOperand(); // Don't custom lower most intrinsics. 4951 // Comparison intrinsics. 4952 case Intrinsic::x86_sse_comieq_ss: 4953 case Intrinsic::x86_sse_comilt_ss: 4954 case Intrinsic::x86_sse_comile_ss: 4955 case Intrinsic::x86_sse_comigt_ss: 4956 case Intrinsic::x86_sse_comige_ss: 4957 case Intrinsic::x86_sse_comineq_ss: 4958 case Intrinsic::x86_sse_ucomieq_ss: 4959 case Intrinsic::x86_sse_ucomilt_ss: 4960 case Intrinsic::x86_sse_ucomile_ss: 4961 case Intrinsic::x86_sse_ucomigt_ss: 4962 case Intrinsic::x86_sse_ucomige_ss: 4963 case Intrinsic::x86_sse_ucomineq_ss: 4964 case Intrinsic::x86_sse2_comieq_sd: 4965 case Intrinsic::x86_sse2_comilt_sd: 4966 case Intrinsic::x86_sse2_comile_sd: 4967 case Intrinsic::x86_sse2_comigt_sd: 4968 case Intrinsic::x86_sse2_comige_sd: 4969 case Intrinsic::x86_sse2_comineq_sd: 4970 case Intrinsic::x86_sse2_ucomieq_sd: 4971 case Intrinsic::x86_sse2_ucomilt_sd: 4972 case Intrinsic::x86_sse2_ucomile_sd: 4973 case Intrinsic::x86_sse2_ucomigt_sd: 4974 case Intrinsic::x86_sse2_ucomige_sd: 4975 case Intrinsic::x86_sse2_ucomineq_sd: { 4976 unsigned Opc = 0; 4977 ISD::CondCode CC = ISD::SETCC_INVALID; 4978 switch (IntNo) { 4979 default: break; 4980 case Intrinsic::x86_sse_comieq_ss: 4981 case Intrinsic::x86_sse2_comieq_sd: 4982 Opc = X86ISD::COMI; 4983 CC = ISD::SETEQ; 4984 break; 4985 case Intrinsic::x86_sse_comilt_ss: 4986 case Intrinsic::x86_sse2_comilt_sd: 4987 Opc = X86ISD::COMI; 4988 CC = ISD::SETLT; 4989 break; 4990 case Intrinsic::x86_sse_comile_ss: 4991 case Intrinsic::x86_sse2_comile_sd: 4992 Opc = X86ISD::COMI; 4993 CC = ISD::SETLE; 4994 break; 4995 case Intrinsic::x86_sse_comigt_ss: 4996 case Intrinsic::x86_sse2_comigt_sd: 4997 Opc = X86ISD::COMI; 4998 CC = ISD::SETGT; 4999 break; 5000 case Intrinsic::x86_sse_comige_ss: 5001 case Intrinsic::x86_sse2_comige_sd: 5002 Opc = X86ISD::COMI; 5003 CC = ISD::SETGE; 5004 break; 5005 case Intrinsic::x86_sse_comineq_ss: 5006 case Intrinsic::x86_sse2_comineq_sd: 5007 Opc = X86ISD::COMI; 5008 CC = ISD::SETNE; 5009 break; 5010 case Intrinsic::x86_sse_ucomieq_ss: 5011 case Intrinsic::x86_sse2_ucomieq_sd: 5012 Opc = X86ISD::UCOMI; 5013 CC = ISD::SETEQ; 5014 break; 5015 case Intrinsic::x86_sse_ucomilt_ss: 5016 case Intrinsic::x86_sse2_ucomilt_sd: 5017 Opc = X86ISD::UCOMI; 5018 CC = ISD::SETLT; 5019 break; 5020 case Intrinsic::x86_sse_ucomile_ss: 5021 case Intrinsic::x86_sse2_ucomile_sd: 5022 Opc = X86ISD::UCOMI; 5023 CC = ISD::SETLE; 5024 break; 5025 case Intrinsic::x86_sse_ucomigt_ss: 5026 case Intrinsic::x86_sse2_ucomigt_sd: 5027 Opc = X86ISD::UCOMI; 5028 CC = ISD::SETGT; 5029 break; 5030 case Intrinsic::x86_sse_ucomige_ss: 5031 case Intrinsic::x86_sse2_ucomige_sd: 5032 Opc = X86ISD::UCOMI; 5033 CC = ISD::SETGE; 5034 break; 5035 case Intrinsic::x86_sse_ucomineq_ss: 5036 case Intrinsic::x86_sse2_ucomineq_sd: 5037 Opc = X86ISD::UCOMI; 5038 CC = ISD::SETNE; 5039 break; 5040 } 5041 5042 unsigned X86CC; 5043 SDOperand LHS = Op.getOperand(1); 5044 SDOperand RHS = Op.getOperand(2); 5045 translateX86CC(CC, true, X86CC, LHS, RHS, DAG); 5046 5047 SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS); 5048 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8, 5049 DAG.getConstant(X86CC, MVT::i8), Cond); 5050 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); 5051 } 5052 } 5053} 5054 5055SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { 5056 // Depths > 0 not supported yet! 5057 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 5058 return SDOperand(); 5059 5060 // Just load the return address 5061 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 5062 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); 5063} 5064 5065SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { 5066 // Depths > 0 not supported yet! 5067 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 5068 return SDOperand(); 5069 5070 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 5071 return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI, 5072 DAG.getIntPtrConstant(4)); 5073} 5074 5075SDOperand X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDOperand Op, 5076 SelectionDAG &DAG) { 5077 // Is not yet supported on x86-64 5078 if (Subtarget->is64Bit()) 5079 return SDOperand(); 5080 5081 return DAG.getIntPtrConstant(8); 5082} 5083 5084SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG) 5085{ 5086 assert(!Subtarget->is64Bit() && 5087 "Lowering of eh_return builtin is not supported yet on x86-64"); 5088 5089 MachineFunction &MF = DAG.getMachineFunction(); 5090 SDOperand Chain = Op.getOperand(0); 5091 SDOperand Offset = Op.getOperand(1); 5092 SDOperand Handler = Op.getOperand(2); 5093 5094 SDOperand Frame = DAG.getRegister(RegInfo->getFrameRegister(MF), 5095 getPointerTy()); 5096 5097 SDOperand StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame, 5098 DAG.getIntPtrConstant(-4UL)); 5099 StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset); 5100 Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0); 5101 Chain = DAG.getCopyToReg(Chain, X86::ECX, StoreAddr); 5102 MF.getRegInfo().addLiveOut(X86::ECX); 5103 5104 return DAG.getNode(X86ISD::EH_RETURN, MVT::Other, 5105 Chain, DAG.getRegister(X86::ECX, getPointerTy())); 5106} 5107 5108SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, 5109 SelectionDAG &DAG) { 5110 SDOperand Root = Op.getOperand(0); 5111 SDOperand Trmp = Op.getOperand(1); // trampoline 5112 SDOperand FPtr = Op.getOperand(2); // nested function 5113 SDOperand Nest = Op.getOperand(3); // 'nest' parameter value 5114 5115 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 5116 5117 const X86InstrInfo *TII = 5118 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 5119 5120 if (Subtarget->is64Bit()) { 5121 SDOperand OutChains[6]; 5122 5123 // Large code-model. 5124 5125 const unsigned char JMP64r = TII->getBaseOpcodeFor(X86::JMP64r); 5126 const unsigned char MOV64ri = TII->getBaseOpcodeFor(X86::MOV64ri); 5127 5128 const unsigned char N86R10 = 5129 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R10); 5130 const unsigned char N86R11 = 5131 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R11); 5132 5133 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 5134 5135 // Load the pointer to the nested function into R11. 5136 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 5137 SDOperand Addr = Trmp; 5138 OutChains[0] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5139 TrmpAddr, 0); 5140 5141 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(2, MVT::i64)); 5142 OutChains[1] = DAG.getStore(Root, FPtr, Addr, TrmpAddr, 2, false, 2); 5143 5144 // Load the 'nest' parameter value into R10. 5145 // R10 is specified in X86CallingConv.td 5146 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 5147 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(10, MVT::i64)); 5148 OutChains[2] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5149 TrmpAddr, 10); 5150 5151 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(12, MVT::i64)); 5152 OutChains[3] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 12, false, 2); 5153 5154 // Jump to the nested function. 5155 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 5156 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(20, MVT::i64)); 5157 OutChains[4] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5158 TrmpAddr, 20); 5159 5160 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 5161 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(22, MVT::i64)); 5162 OutChains[5] = DAG.getStore(Root, DAG.getConstant(ModRM, MVT::i8), Addr, 5163 TrmpAddr, 22); 5164 5165 SDOperand Ops[] = 5166 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 6) }; 5167 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5168 } else { 5169 const Function *Func = 5170 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 5171 unsigned CC = Func->getCallingConv(); 5172 unsigned NestReg; 5173 5174 switch (CC) { 5175 default: 5176 assert(0 && "Unsupported calling convention"); 5177 case CallingConv::C: 5178 case CallingConv::X86_StdCall: { 5179 // Pass 'nest' parameter in ECX. 5180 // Must be kept in sync with X86CallingConv.td 5181 NestReg = X86::ECX; 5182 5183 // Check that ECX wasn't needed by an 'inreg' parameter. 5184 const FunctionType *FTy = Func->getFunctionType(); 5185 const ParamAttrsList *Attrs = Func->getParamAttrs(); 5186 5187 if (Attrs && !Func->isVarArg()) { 5188 unsigned InRegCount = 0; 5189 unsigned Idx = 1; 5190 5191 for (FunctionType::param_iterator I = FTy->param_begin(), 5192 E = FTy->param_end(); I != E; ++I, ++Idx) 5193 if (Attrs->paramHasAttr(Idx, ParamAttr::InReg)) 5194 // FIXME: should only count parameters that are lowered to integers. 5195 InRegCount += (getTargetData()->getTypeSizeInBits(*I) + 31) / 32; 5196 5197 if (InRegCount > 2) { 5198 cerr << "Nest register in use - reduce number of inreg parameters!\n"; 5199 abort(); 5200 } 5201 } 5202 break; 5203 } 5204 case CallingConv::X86_FastCall: 5205 // Pass 'nest' parameter in EAX. 5206 // Must be kept in sync with X86CallingConv.td 5207 NestReg = X86::EAX; 5208 break; 5209 } 5210 5211 SDOperand OutChains[4]; 5212 SDOperand Addr, Disp; 5213 5214 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32)); 5215 Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr); 5216 5217 const unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri); 5218 const unsigned char N86Reg = 5219 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(NestReg); 5220 OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 5221 Trmp, TrmpAddr, 0); 5222 5223 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32)); 5224 OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 1, false, 1); 5225 5226 const unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP); 5227 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32)); 5228 OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr, 5229 TrmpAddr, 5, false, 1); 5230 5231 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(6, MVT::i32)); 5232 OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpAddr, 6, false, 1); 5233 5234 SDOperand Ops[] = 5235 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) }; 5236 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5237 } 5238} 5239 5240SDOperand X86TargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { 5241 /* 5242 The rounding mode is in bits 11:10 of FPSR, and has the following 5243 settings: 5244 00 Round to nearest 5245 01 Round to -inf 5246 10 Round to +inf 5247 11 Round to 0 5248 5249 FLT_ROUNDS, on the other hand, expects the following: 5250 -1 Undefined 5251 0 Round to 0 5252 1 Round to nearest 5253 2 Round to +inf 5254 3 Round to -inf 5255 5256 To perform the conversion, we do: 5257 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 5258 */ 5259 5260 MachineFunction &MF = DAG.getMachineFunction(); 5261 const TargetMachine &TM = MF.getTarget(); 5262 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 5263 unsigned StackAlignment = TFI.getStackAlignment(); 5264 MVT::ValueType VT = Op.getValueType(); 5265 5266 // Save FP Control Word to stack slot 5267 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment); 5268 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 5269 5270 SDOperand Chain = DAG.getNode(X86ISD::FNSTCW16m, MVT::Other, 5271 DAG.getEntryNode(), StackSlot); 5272 5273 // Load FP Control Word from stack slot 5274 SDOperand CWD = DAG.getLoad(MVT::i16, Chain, StackSlot, NULL, 0); 5275 5276 // Transform as necessary 5277 SDOperand CWD1 = 5278 DAG.getNode(ISD::SRL, MVT::i16, 5279 DAG.getNode(ISD::AND, MVT::i16, 5280 CWD, DAG.getConstant(0x800, MVT::i16)), 5281 DAG.getConstant(11, MVT::i8)); 5282 SDOperand CWD2 = 5283 DAG.getNode(ISD::SRL, MVT::i16, 5284 DAG.getNode(ISD::AND, MVT::i16, 5285 CWD, DAG.getConstant(0x400, MVT::i16)), 5286 DAG.getConstant(9, MVT::i8)); 5287 5288 SDOperand RetVal = 5289 DAG.getNode(ISD::AND, MVT::i16, 5290 DAG.getNode(ISD::ADD, MVT::i16, 5291 DAG.getNode(ISD::OR, MVT::i16, CWD1, CWD2), 5292 DAG.getConstant(1, MVT::i16)), 5293 DAG.getConstant(3, MVT::i16)); 5294 5295 5296 return DAG.getNode((MVT::getSizeInBits(VT) < 16 ? 5297 ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); 5298} 5299 5300SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) { 5301 MVT::ValueType VT = Op.getValueType(); 5302 MVT::ValueType OpVT = VT; 5303 unsigned NumBits = MVT::getSizeInBits(VT); 5304 5305 Op = Op.getOperand(0); 5306 if (VT == MVT::i8) { 5307 // Zero extend to i32 since there is not an i8 bsr. 5308 OpVT = MVT::i32; 5309 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5310 } 5311 5312 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 5313 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5314 Op = DAG.getNode(X86ISD::BSR, VTs, Op); 5315 5316 // If src is zero (i.e. bsr sets ZF), returns NumBits. 5317 SmallVector<SDOperand, 4> Ops; 5318 Ops.push_back(Op); 5319 Ops.push_back(DAG.getConstant(NumBits+NumBits-1, OpVT)); 5320 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5321 Ops.push_back(Op.getValue(1)); 5322 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5323 5324 // Finally xor with NumBits-1. 5325 Op = DAG.getNode(ISD::XOR, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 5326 5327 if (VT == MVT::i8) 5328 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5329 return Op; 5330} 5331 5332SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { 5333 MVT::ValueType VT = Op.getValueType(); 5334 MVT::ValueType OpVT = VT; 5335 unsigned NumBits = MVT::getSizeInBits(VT); 5336 5337 Op = Op.getOperand(0); 5338 if (VT == MVT::i8) { 5339 OpVT = MVT::i32; 5340 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5341 } 5342 5343 // Issue a bsf (scan bits forward) which also sets EFLAGS. 5344 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5345 Op = DAG.getNode(X86ISD::BSF, VTs, Op); 5346 5347 // If src is zero (i.e. bsf sets ZF), returns NumBits. 5348 SmallVector<SDOperand, 4> Ops; 5349 Ops.push_back(Op); 5350 Ops.push_back(DAG.getConstant(NumBits, OpVT)); 5351 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5352 Ops.push_back(Op.getValue(1)); 5353 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5354 5355 if (VT == MVT::i8) 5356 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5357 return Op; 5358} 5359 5360SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) { 5361 MVT::ValueType T = cast<AtomicSDNode>(Op.Val)->getVT(); 5362 unsigned Reg = 0; 5363 unsigned size = 0; 5364 switch(T) { 5365 case MVT::i8: Reg = X86::AL; size = 1; break; 5366 case MVT::i16: Reg = X86::AX; size = 2; break; 5367 case MVT::i32: Reg = X86::EAX; size = 4; break; 5368 case MVT::i64: 5369 if (Subtarget->is64Bit()) { 5370 Reg = X86::RAX; size = 8; 5371 } else //Should go away when LowerType stuff lands 5372 return SDOperand(ExpandATOMIC_LCS(Op.Val, DAG), 0); 5373 break; 5374 }; 5375 SDOperand cpIn = DAG.getCopyToReg(Op.getOperand(0), Reg, 5376 Op.getOperand(3), SDOperand()); 5377 SDOperand Ops[] = { cpIn.getValue(0), 5378 Op.getOperand(1), 5379 Op.getOperand(2), 5380 DAG.getTargetConstant(size, MVT::i8), 5381 cpIn.getValue(1) }; 5382 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 5383 SDOperand Result = DAG.getNode(X86ISD::LCMPXCHG_DAG, Tys, Ops, 5); 5384 SDOperand cpOut = 5385 DAG.getCopyFromReg(Result.getValue(0), Reg, T, Result.getValue(1)); 5386 return cpOut; 5387} 5388 5389SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) { 5390 MVT::ValueType T = cast<AtomicSDNode>(Op)->getVT(); 5391 assert (T == MVT::i64 && "Only know how to expand i64 CAS"); 5392 SDOperand cpInL, cpInH; 5393 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), 5394 DAG.getConstant(0, MVT::i32)); 5395 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), 5396 DAG.getConstant(1, MVT::i32)); 5397 cpInL = DAG.getCopyToReg(Op->getOperand(0), X86::EAX, 5398 cpInL, SDOperand()); 5399 cpInH = DAG.getCopyToReg(cpInL.getValue(0), X86::EDX, 5400 cpInH, cpInL.getValue(1)); 5401 SDOperand swapInL, swapInH; 5402 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(2), 5403 DAG.getConstant(0, MVT::i32)); 5404 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(2), 5405 DAG.getConstant(1, MVT::i32)); 5406 swapInL = DAG.getCopyToReg(cpInH.getValue(0), X86::EBX, 5407 swapInL, cpInH.getValue(1)); 5408 swapInH = DAG.getCopyToReg(swapInL.getValue(0), X86::ECX, 5409 swapInH, swapInL.getValue(1)); 5410 SDOperand Ops[] = { swapInH.getValue(0), 5411 Op->getOperand(1), 5412 swapInH.getValue(1)}; 5413 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 5414 SDOperand Result = DAG.getNode(X86ISD::LCMPXCHG8_DAG, Tys, Ops, 3); 5415 SDOperand cpOutL = DAG.getCopyFromReg(Result.getValue(0), X86::EAX, MVT::i32, 5416 Result.getValue(1)); 5417 SDOperand cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), X86::EDX, MVT::i32, 5418 cpOutL.getValue(2)); 5419 SDOperand OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 5420 SDOperand ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OpsF, 2); 5421 Tys = DAG.getVTList(MVT::i64, MVT::Other); 5422 return DAG.getNode(ISD::MERGE_VALUES, Tys, ResultVal, cpOutH.getValue(1)).Val; 5423} 5424 5425/// LowerOperation - Provide custom lowering hooks for some operations. 5426/// 5427SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 5428 switch (Op.getOpcode()) { 5429 default: assert(0 && "Should not custom lower this!"); 5430 case ISD::ATOMIC_LCS: return LowerLCS(Op,DAG); 5431 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 5432 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5433 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 5434 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 5435 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 5436 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5437 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 5438 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5439 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 5440 case ISD::SHL_PARTS: 5441 case ISD::SRA_PARTS: 5442 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 5443 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 5444 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 5445 case ISD::FABS: return LowerFABS(Op, DAG); 5446 case ISD::FNEG: return LowerFNEG(Op, DAG); 5447 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 5448 case ISD::SETCC: return LowerSETCC(Op, DAG); 5449 case ISD::SELECT: return LowerSELECT(Op, DAG); 5450 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 5451 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 5452 case ISD::CALL: return LowerCALL(Op, DAG); 5453 case ISD::RET: return LowerRET(Op, DAG); 5454 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); 5455 case ISD::MEMSET: return LowerMEMSET(Op, DAG); 5456 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG); 5457 case ISD::VASTART: return LowerVASTART(Op, DAG); 5458 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 5459 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 5460 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5461 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5462 case ISD::FRAME_TO_ARGS_OFFSET: 5463 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 5464 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 5465 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 5466 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 5467 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5468 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 5469 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 5470 5471 // FIXME: REMOVE THIS WHEN LegalizeDAGTypes lands. 5472 case ISD::READCYCLECOUNTER: 5473 return SDOperand(ExpandREADCYCLECOUNTER(Op.Val, DAG), 0); 5474 } 5475} 5476 5477/// ExpandOperation - Provide custom lowering hooks for expanding operations. 5478SDNode *X86TargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) { 5479 switch (N->getOpcode()) { 5480 default: assert(0 && "Should not custom lower this!"); 5481 case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG); 5482 case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG); 5483 case ISD::ATOMIC_LCS: return ExpandATOMIC_LCS(N, DAG); 5484 } 5485} 5486 5487const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 5488 switch (Opcode) { 5489 default: return NULL; 5490 case X86ISD::BSF: return "X86ISD::BSF"; 5491 case X86ISD::BSR: return "X86ISD::BSR"; 5492 case X86ISD::SHLD: return "X86ISD::SHLD"; 5493 case X86ISD::SHRD: return "X86ISD::SHRD"; 5494 case X86ISD::FAND: return "X86ISD::FAND"; 5495 case X86ISD::FOR: return "X86ISD::FOR"; 5496 case X86ISD::FXOR: return "X86ISD::FXOR"; 5497 case X86ISD::FSRL: return "X86ISD::FSRL"; 5498 case X86ISD::FILD: return "X86ISD::FILD"; 5499 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 5500 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 5501 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 5502 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 5503 case X86ISD::FLD: return "X86ISD::FLD"; 5504 case X86ISD::FST: return "X86ISD::FST"; 5505 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT"; 5506 case X86ISD::FP_GET_RESULT2: return "X86ISD::FP_GET_RESULT2"; 5507 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT"; 5508 case X86ISD::CALL: return "X86ISD::CALL"; 5509 case X86ISD::TAILCALL: return "X86ISD::TAILCALL"; 5510 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 5511 case X86ISD::CMP: return "X86ISD::CMP"; 5512 case X86ISD::COMI: return "X86ISD::COMI"; 5513 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 5514 case X86ISD::SETCC: return "X86ISD::SETCC"; 5515 case X86ISD::CMOV: return "X86ISD::CMOV"; 5516 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 5517 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 5518 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 5519 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 5520 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 5521 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 5522 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 5523 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 5524 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 5525 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 5526 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 5527 case X86ISD::FMAX: return "X86ISD::FMAX"; 5528 case X86ISD::FMIN: return "X86ISD::FMIN"; 5529 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 5530 case X86ISD::FRCP: return "X86ISD::FRCP"; 5531 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 5532 case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER"; 5533 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 5534 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 5535 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 5536 case X86ISD::LCMPXCHG_DAG: return "x86ISD::LCMPXCHG_DAG"; 5537 case X86ISD::LCMPXCHG8_DAG: return "x86ISD::LCMPXCHG8_DAG"; 5538 } 5539} 5540 5541// isLegalAddressingMode - Return true if the addressing mode represented 5542// by AM is legal for this target, for a load/store of the specified type. 5543bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 5544 const Type *Ty) const { 5545 // X86 supports extremely general addressing modes. 5546 5547 // X86 allows a sign-extended 32-bit immediate field as a displacement. 5548 if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1) 5549 return false; 5550 5551 if (AM.BaseGV) { 5552 // We can only fold this if we don't need an extra load. 5553 if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false)) 5554 return false; 5555 5556 // X86-64 only supports addr of globals in small code model. 5557 if (Subtarget->is64Bit()) { 5558 if (getTargetMachine().getCodeModel() != CodeModel::Small) 5559 return false; 5560 // If lower 4G is not available, then we must use rip-relative addressing. 5561 if (AM.BaseOffs || AM.Scale > 1) 5562 return false; 5563 } 5564 } 5565 5566 switch (AM.Scale) { 5567 case 0: 5568 case 1: 5569 case 2: 5570 case 4: 5571 case 8: 5572 // These scales always work. 5573 break; 5574 case 3: 5575 case 5: 5576 case 9: 5577 // These scales are formed with basereg+scalereg. Only accept if there is 5578 // no basereg yet. 5579 if (AM.HasBaseReg) 5580 return false; 5581 break; 5582 default: // Other stuff never works. 5583 return false; 5584 } 5585 5586 return true; 5587} 5588 5589 5590bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const { 5591 if (!Ty1->isInteger() || !Ty2->isInteger()) 5592 return false; 5593 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 5594 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 5595 if (NumBits1 <= NumBits2) 5596 return false; 5597 return Subtarget->is64Bit() || NumBits1 < 64; 5598} 5599 5600bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1, 5601 MVT::ValueType VT2) const { 5602 if (!MVT::isInteger(VT1) || !MVT::isInteger(VT2)) 5603 return false; 5604 unsigned NumBits1 = MVT::getSizeInBits(VT1); 5605 unsigned NumBits2 = MVT::getSizeInBits(VT2); 5606 if (NumBits1 <= NumBits2) 5607 return false; 5608 return Subtarget->is64Bit() || NumBits1 < 64; 5609} 5610 5611/// isShuffleMaskLegal - Targets can use this to indicate that they only 5612/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 5613/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 5614/// are assumed to be legal. 5615bool 5616X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { 5617 // Only do shuffles on 128-bit vector types for now. 5618 if (MVT::getSizeInBits(VT) == 64) return false; 5619 return (Mask.Val->getNumOperands() <= 4 || 5620 isIdentityMask(Mask.Val) || 5621 isIdentityMask(Mask.Val, true) || 5622 isSplatMask(Mask.Val) || 5623 isPSHUFHW_PSHUFLWMask(Mask.Val) || 5624 X86::isUNPCKLMask(Mask.Val) || 5625 X86::isUNPCKHMask(Mask.Val) || 5626 X86::isUNPCKL_v_undef_Mask(Mask.Val) || 5627 X86::isUNPCKH_v_undef_Mask(Mask.Val)); 5628} 5629 5630bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps, 5631 MVT::ValueType EVT, 5632 SelectionDAG &DAG) const { 5633 unsigned NumElts = BVOps.size(); 5634 // Only do shuffles on 128-bit vector types for now. 5635 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; 5636 if (NumElts == 2) return true; 5637 if (NumElts == 4) { 5638 return (isMOVLMask(&BVOps[0], 4) || 5639 isCommutedMOVL(&BVOps[0], 4, true) || 5640 isSHUFPMask(&BVOps[0], 4) || 5641 isCommutedSHUFP(&BVOps[0], 4)); 5642 } 5643 return false; 5644} 5645 5646//===----------------------------------------------------------------------===// 5647// X86 Scheduler Hooks 5648//===----------------------------------------------------------------------===// 5649 5650MachineBasicBlock * 5651X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 5652 MachineBasicBlock *BB) { 5653 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5654 switch (MI->getOpcode()) { 5655 default: assert(false && "Unexpected instr type to insert"); 5656 case X86::CMOV_FR32: 5657 case X86::CMOV_FR64: 5658 case X86::CMOV_V4F32: 5659 case X86::CMOV_V2F64: 5660 case X86::CMOV_V2I64: { 5661 // To "insert" a SELECT_CC instruction, we actually have to insert the 5662 // diamond control-flow pattern. The incoming instruction knows the 5663 // destination vreg to set, the condition code register to branch on, the 5664 // true/false values to select between, and a branch opcode to use. 5665 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5666 ilist<MachineBasicBlock>::iterator It = BB; 5667 ++It; 5668 5669 // thisMBB: 5670 // ... 5671 // TrueVal = ... 5672 // cmpTY ccX, r1, r2 5673 // bCC copy1MBB 5674 // fallthrough --> copy0MBB 5675 MachineBasicBlock *thisMBB = BB; 5676 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 5677 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 5678 unsigned Opc = 5679 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 5680 BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB); 5681 MachineFunction *F = BB->getParent(); 5682 F->getBasicBlockList().insert(It, copy0MBB); 5683 F->getBasicBlockList().insert(It, sinkMBB); 5684 // Update machine-CFG edges by first adding all successors of the current 5685 // block to the new block which will contain the Phi node for the select. 5686 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 5687 e = BB->succ_end(); i != e; ++i) 5688 sinkMBB->addSuccessor(*i); 5689 // Next, remove all successors of the current block, and add the true 5690 // and fallthrough blocks as its successors. 5691 while(!BB->succ_empty()) 5692 BB->removeSuccessor(BB->succ_begin()); 5693 BB->addSuccessor(copy0MBB); 5694 BB->addSuccessor(sinkMBB); 5695 5696 // copy0MBB: 5697 // %FalseValue = ... 5698 // # fallthrough to sinkMBB 5699 BB = copy0MBB; 5700 5701 // Update machine-CFG edges 5702 BB->addSuccessor(sinkMBB); 5703 5704 // sinkMBB: 5705 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 5706 // ... 5707 BB = sinkMBB; 5708 BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg()) 5709 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 5710 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 5711 5712 delete MI; // The pseudo instruction is gone now. 5713 return BB; 5714 } 5715 5716 case X86::FP32_TO_INT16_IN_MEM: 5717 case X86::FP32_TO_INT32_IN_MEM: 5718 case X86::FP32_TO_INT64_IN_MEM: 5719 case X86::FP64_TO_INT16_IN_MEM: 5720 case X86::FP64_TO_INT32_IN_MEM: 5721 case X86::FP64_TO_INT64_IN_MEM: 5722 case X86::FP80_TO_INT16_IN_MEM: 5723 case X86::FP80_TO_INT32_IN_MEM: 5724 case X86::FP80_TO_INT64_IN_MEM: { 5725 // Change the floating point control register to use "round towards zero" 5726 // mode when truncating to an integer value. 5727 MachineFunction *F = BB->getParent(); 5728 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2); 5729 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx); 5730 5731 // Load the old value of the high byte of the control word... 5732 unsigned OldCW = 5733 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass); 5734 addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx); 5735 5736 // Set the high part to be round to zero... 5737 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx) 5738 .addImm(0xC7F); 5739 5740 // Reload the modified control word now... 5741 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5742 5743 // Restore the memory image of control word to original value 5744 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx) 5745 .addReg(OldCW); 5746 5747 // Get the X86 opcode to use. 5748 unsigned Opc; 5749 switch (MI->getOpcode()) { 5750 default: assert(0 && "illegal opcode!"); 5751 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 5752 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 5753 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 5754 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 5755 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 5756 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 5757 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 5758 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 5759 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 5760 } 5761 5762 X86AddressMode AM; 5763 MachineOperand &Op = MI->getOperand(0); 5764 if (Op.isRegister()) { 5765 AM.BaseType = X86AddressMode::RegBase; 5766 AM.Base.Reg = Op.getReg(); 5767 } else { 5768 AM.BaseType = X86AddressMode::FrameIndexBase; 5769 AM.Base.FrameIndex = Op.getIndex(); 5770 } 5771 Op = MI->getOperand(1); 5772 if (Op.isImmediate()) 5773 AM.Scale = Op.getImm(); 5774 Op = MI->getOperand(2); 5775 if (Op.isImmediate()) 5776 AM.IndexReg = Op.getImm(); 5777 Op = MI->getOperand(3); 5778 if (Op.isGlobalAddress()) { 5779 AM.GV = Op.getGlobal(); 5780 } else { 5781 AM.Disp = Op.getImm(); 5782 } 5783 addFullAddress(BuildMI(BB, TII->get(Opc)), AM) 5784 .addReg(MI->getOperand(4).getReg()); 5785 5786 // Reload the original control word now. 5787 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5788 5789 delete MI; // The pseudo instruction is gone now. 5790 return BB; 5791 } 5792 } 5793} 5794 5795//===----------------------------------------------------------------------===// 5796// X86 Optimization Hooks 5797//===----------------------------------------------------------------------===// 5798 5799void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 5800 const APInt &Mask, 5801 APInt &KnownZero, 5802 APInt &KnownOne, 5803 const SelectionDAG &DAG, 5804 unsigned Depth) const { 5805 unsigned Opc = Op.getOpcode(); 5806 assert((Opc >= ISD::BUILTIN_OP_END || 5807 Opc == ISD::INTRINSIC_WO_CHAIN || 5808 Opc == ISD::INTRINSIC_W_CHAIN || 5809 Opc == ISD::INTRINSIC_VOID) && 5810 "Should use MaskedValueIsZero if you don't know whether Op" 5811 " is a target node!"); 5812 5813 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything. 5814 switch (Opc) { 5815 default: break; 5816 case X86ISD::SETCC: 5817 KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(), 5818 Mask.getBitWidth() - 1); 5819 break; 5820 } 5821} 5822 5823/// getShuffleScalarElt - Returns the scalar element that will make up the ith 5824/// element of the result of the vector shuffle. 5825static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) { 5826 MVT::ValueType VT = N->getValueType(0); 5827 SDOperand PermMask = N->getOperand(2); 5828 unsigned NumElems = PermMask.getNumOperands(); 5829 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1); 5830 i %= NumElems; 5831 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5832 return (i == 0) 5833 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5834 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) { 5835 SDOperand Idx = PermMask.getOperand(i); 5836 if (Idx.getOpcode() == ISD::UNDEF) 5837 return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5838 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG); 5839 } 5840 return SDOperand(); 5841} 5842 5843/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 5844/// node is a GlobalAddress + an offset. 5845static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) { 5846 unsigned Opc = N->getOpcode(); 5847 if (Opc == X86ISD::Wrapper) { 5848 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) { 5849 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 5850 return true; 5851 } 5852 } else if (Opc == ISD::ADD) { 5853 SDOperand N1 = N->getOperand(0); 5854 SDOperand N2 = N->getOperand(1); 5855 if (isGAPlusOffset(N1.Val, GA, Offset)) { 5856 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 5857 if (V) { 5858 Offset += V->getSignExtended(); 5859 return true; 5860 } 5861 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 5862 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 5863 if (V) { 5864 Offset += V->getSignExtended(); 5865 return true; 5866 } 5867 } 5868 } 5869 return false; 5870} 5871 5872/// isConsecutiveLoad - Returns true if N is loading from an address of Base 5873/// + Dist * Size. 5874static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size, 5875 MachineFrameInfo *MFI) { 5876 if (N->getOperand(0).Val != Base->getOperand(0).Val) 5877 return false; 5878 5879 SDOperand Loc = N->getOperand(1); 5880 SDOperand BaseLoc = Base->getOperand(1); 5881 if (Loc.getOpcode() == ISD::FrameIndex) { 5882 if (BaseLoc.getOpcode() != ISD::FrameIndex) 5883 return false; 5884 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 5885 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 5886 int FS = MFI->getObjectSize(FI); 5887 int BFS = MFI->getObjectSize(BFI); 5888 if (FS != BFS || FS != Size) return false; 5889 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size); 5890 } else { 5891 GlobalValue *GV1 = NULL; 5892 GlobalValue *GV2 = NULL; 5893 int64_t Offset1 = 0; 5894 int64_t Offset2 = 0; 5895 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 5896 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 5897 if (isGA1 && isGA2 && GV1 == GV2) 5898 return Offset1 == (Offset2 + Dist*Size); 5899 } 5900 5901 return false; 5902} 5903 5904static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI, 5905 const X86Subtarget *Subtarget) { 5906 GlobalValue *GV; 5907 int64_t Offset = 0; 5908 if (isGAPlusOffset(Base, GV, Offset)) 5909 return (GV->getAlignment() >= 16 && (Offset % 16) == 0); 5910 // DAG combine handles the stack object case. 5911 return false; 5912} 5913 5914 5915/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 5916/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 5917/// if the load addresses are consecutive, non-overlapping, and in the right 5918/// order. 5919static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 5920 const X86Subtarget *Subtarget) { 5921 MachineFunction &MF = DAG.getMachineFunction(); 5922 MachineFrameInfo *MFI = MF.getFrameInfo(); 5923 MVT::ValueType VT = N->getValueType(0); 5924 MVT::ValueType EVT = MVT::getVectorElementType(VT); 5925 SDOperand PermMask = N->getOperand(2); 5926 int NumElems = (int)PermMask.getNumOperands(); 5927 SDNode *Base = NULL; 5928 for (int i = 0; i < NumElems; ++i) { 5929 SDOperand Idx = PermMask.getOperand(i); 5930 if (Idx.getOpcode() == ISD::UNDEF) { 5931 if (!Base) return SDOperand(); 5932 } else { 5933 SDOperand Arg = 5934 getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG); 5935 if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val)) 5936 return SDOperand(); 5937 if (!Base) 5938 Base = Arg.Val; 5939 else if (!isConsecutiveLoad(Arg.Val, Base, 5940 i, MVT::getSizeInBits(EVT)/8,MFI)) 5941 return SDOperand(); 5942 } 5943 } 5944 5945 bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget); 5946 LoadSDNode *LD = cast<LoadSDNode>(Base); 5947 if (isAlign16) { 5948 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 5949 LD->getSrcValueOffset(), LD->isVolatile()); 5950 } else { 5951 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 5952 LD->getSrcValueOffset(), LD->isVolatile(), 5953 LD->getAlignment()); 5954 } 5955} 5956 5957/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 5958static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 5959 const X86Subtarget *Subtarget) { 5960 SDOperand Cond = N->getOperand(0); 5961 5962 // If we have SSE[12] support, try to form min/max nodes. 5963 if (Subtarget->hasSSE2() && 5964 (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) { 5965 if (Cond.getOpcode() == ISD::SETCC) { 5966 // Get the LHS/RHS of the select. 5967 SDOperand LHS = N->getOperand(1); 5968 SDOperand RHS = N->getOperand(2); 5969 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 5970 5971 unsigned Opcode = 0; 5972 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) { 5973 switch (CC) { 5974 default: break; 5975 case ISD::SETOLE: // (X <= Y) ? X : Y -> min 5976 case ISD::SETULE: 5977 case ISD::SETLE: 5978 if (!UnsafeFPMath) break; 5979 // FALL THROUGH. 5980 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min 5981 case ISD::SETLT: 5982 Opcode = X86ISD::FMIN; 5983 break; 5984 5985 case ISD::SETOGT: // (X > Y) ? X : Y -> max 5986 case ISD::SETUGT: 5987 case ISD::SETGT: 5988 if (!UnsafeFPMath) break; 5989 // FALL THROUGH. 5990 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max 5991 case ISD::SETGE: 5992 Opcode = X86ISD::FMAX; 5993 break; 5994 } 5995 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) { 5996 switch (CC) { 5997 default: break; 5998 case ISD::SETOGT: // (X > Y) ? Y : X -> min 5999 case ISD::SETUGT: 6000 case ISD::SETGT: 6001 if (!UnsafeFPMath) break; 6002 // FALL THROUGH. 6003 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min 6004 case ISD::SETGE: 6005 Opcode = X86ISD::FMIN; 6006 break; 6007 6008 case ISD::SETOLE: // (X <= Y) ? Y : X -> max 6009 case ISD::SETULE: 6010 case ISD::SETLE: 6011 if (!UnsafeFPMath) break; 6012 // FALL THROUGH. 6013 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max 6014 case ISD::SETLT: 6015 Opcode = X86ISD::FMAX; 6016 break; 6017 } 6018 } 6019 6020 if (Opcode) 6021 return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS); 6022 } 6023 6024 } 6025 6026 return SDOperand(); 6027} 6028 6029/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 6030static SDOperand PerformSTORECombine(StoreSDNode *St, SelectionDAG &DAG, 6031 const X86Subtarget *Subtarget) { 6032 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 6033 // the FP state in cases where an emms may be missing. 6034 // A preferable solution to the general problem is to figure out the right 6035 // places to insert EMMS. This qualifies as a quick hack. 6036 if (MVT::isVector(St->getValue().getValueType()) && 6037 MVT::getSizeInBits(St->getValue().getValueType()) == 64 && 6038 isa<LoadSDNode>(St->getValue()) && 6039 !cast<LoadSDNode>(St->getValue())->isVolatile() && 6040 St->getChain().hasOneUse() && !St->isVolatile()) { 6041 SDNode* LdVal = St->getValue().Val; 6042 LoadSDNode *Ld = 0; 6043 int TokenFactorIndex = -1; 6044 SmallVector<SDOperand, 8> Ops; 6045 SDNode* ChainVal = St->getChain().Val; 6046 // Must be a store of a load. We currently handle two cases: the load 6047 // is a direct child, and it's under an intervening TokenFactor. It is 6048 // possible to dig deeper under nested TokenFactors. 6049 if (ChainVal == LdVal) 6050 Ld = cast<LoadSDNode>(St->getChain()); 6051 else if (St->getValue().hasOneUse() && 6052 ChainVal->getOpcode() == ISD::TokenFactor) { 6053 for (unsigned i=0, e = ChainVal->getNumOperands(); i != e; ++i) { 6054 if (ChainVal->getOperand(i).Val == LdVal) { 6055 TokenFactorIndex = i; 6056 Ld = cast<LoadSDNode>(St->getValue()); 6057 } else 6058 Ops.push_back(ChainVal->getOperand(i)); 6059 } 6060 } 6061 if (Ld) { 6062 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 6063 if (Subtarget->is64Bit()) { 6064 SDOperand NewLd = DAG.getLoad(MVT::i64, Ld->getChain(), 6065 Ld->getBasePtr(), Ld->getSrcValue(), 6066 Ld->getSrcValueOffset(), Ld->isVolatile(), 6067 Ld->getAlignment()); 6068 SDOperand NewChain = NewLd.getValue(1); 6069 if (TokenFactorIndex != -1) { 6070 Ops.push_back(NewLd); 6071 NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0], 6072 Ops.size()); 6073 } 6074 return DAG.getStore(NewChain, NewLd, St->getBasePtr(), 6075 St->getSrcValue(), St->getSrcValueOffset(), 6076 St->isVolatile(), St->getAlignment()); 6077 } 6078 6079 // Otherwise, lower to two 32-bit copies. 6080 SDOperand LoAddr = Ld->getBasePtr(); 6081 SDOperand HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, 6082 DAG.getConstant(MVT::i32, 4)); 6083 6084 SDOperand LoLd = DAG.getLoad(MVT::i32, Ld->getChain(), LoAddr, 6085 Ld->getSrcValue(), Ld->getSrcValueOffset(), 6086 Ld->isVolatile(), Ld->getAlignment()); 6087 SDOperand HiLd = DAG.getLoad(MVT::i32, Ld->getChain(), HiAddr, 6088 Ld->getSrcValue(), Ld->getSrcValueOffset()+4, 6089 Ld->isVolatile(), 6090 MinAlign(Ld->getAlignment(), 4)); 6091 6092 SDOperand NewChain = LoLd.getValue(1); 6093 if (TokenFactorIndex != -1) { 6094 Ops.push_back(LoLd); 6095 Ops.push_back(HiLd); 6096 NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0], 6097 Ops.size()); 6098 } 6099 6100 LoAddr = St->getBasePtr(); 6101 HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, 6102 DAG.getConstant(MVT::i32, 4)); 6103 6104 SDOperand LoSt = DAG.getStore(NewChain, LoLd, LoAddr, 6105 St->getSrcValue(), St->getSrcValueOffset(), 6106 St->isVolatile(), St->getAlignment()); 6107 SDOperand HiSt = DAG.getStore(NewChain, HiLd, HiAddr, 6108 St->getSrcValue(), St->getSrcValueOffset()+4, 6109 St->isVolatile(), 6110 MinAlign(St->getAlignment(), 4)); 6111 return DAG.getNode(ISD::TokenFactor, MVT::Other, LoSt, HiSt); 6112 } 6113 } 6114 return SDOperand(); 6115} 6116 6117/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 6118/// X86ISD::FXOR nodes. 6119static SDOperand PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 6120 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 6121 // F[X]OR(0.0, x) -> x 6122 // F[X]OR(x, 0.0) -> x 6123 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 6124 if (C->getValueAPF().isPosZero()) 6125 return N->getOperand(1); 6126 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 6127 if (C->getValueAPF().isPosZero()) 6128 return N->getOperand(0); 6129 return SDOperand(); 6130} 6131 6132/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 6133static SDOperand PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 6134 // FAND(0.0, x) -> 0.0 6135 // FAND(x, 0.0) -> 0.0 6136 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 6137 if (C->getValueAPF().isPosZero()) 6138 return N->getOperand(0); 6139 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 6140 if (C->getValueAPF().isPosZero()) 6141 return N->getOperand(1); 6142 return SDOperand(); 6143} 6144 6145 6146SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, 6147 DAGCombinerInfo &DCI) const { 6148 SelectionDAG &DAG = DCI.DAG; 6149 switch (N->getOpcode()) { 6150 default: break; 6151 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, Subtarget); 6152 case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget); 6153 case ISD::STORE: 6154 return PerformSTORECombine(cast<StoreSDNode>(N), DAG, Subtarget); 6155 case X86ISD::FXOR: 6156 case X86ISD::FOR: return PerformFORCombine(N, DAG); 6157 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 6158 } 6159 6160 return SDOperand(); 6161} 6162 6163//===----------------------------------------------------------------------===// 6164// X86 Inline Assembly Support 6165//===----------------------------------------------------------------------===// 6166 6167/// getConstraintType - Given a constraint letter, return the type of 6168/// constraint it is for this target. 6169X86TargetLowering::ConstraintType 6170X86TargetLowering::getConstraintType(const std::string &Constraint) const { 6171 if (Constraint.size() == 1) { 6172 switch (Constraint[0]) { 6173 case 'A': 6174 case 'r': 6175 case 'R': 6176 case 'l': 6177 case 'q': 6178 case 'Q': 6179 case 'x': 6180 case 'Y': 6181 return C_RegisterClass; 6182 default: 6183 break; 6184 } 6185 } 6186 return TargetLowering::getConstraintType(Constraint); 6187} 6188 6189/// LowerXConstraint - try to replace an X constraint, which matches anything, 6190/// with another that has more specific requirements based on the type of the 6191/// corresponding operand. 6192void X86TargetLowering::lowerXConstraint(MVT::ValueType ConstraintVT, 6193 std::string& s) const { 6194 if (MVT::isFloatingPoint(ConstraintVT)) { 6195 if (Subtarget->hasSSE2()) 6196 s = "Y"; 6197 else if (Subtarget->hasSSE1()) 6198 s = "x"; 6199 else 6200 s = "f"; 6201 } else 6202 return TargetLowering::lowerXConstraint(ConstraintVT, s); 6203} 6204 6205/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 6206/// vector. If it is invalid, don't add anything to Ops. 6207void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, 6208 char Constraint, 6209 std::vector<SDOperand>&Ops, 6210 SelectionDAG &DAG) { 6211 SDOperand Result(0, 0); 6212 6213 switch (Constraint) { 6214 default: break; 6215 case 'I': 6216 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 6217 if (C->getValue() <= 31) { 6218 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 6219 break; 6220 } 6221 } 6222 return; 6223 case 'N': 6224 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 6225 if (C->getValue() <= 255) { 6226 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 6227 break; 6228 } 6229 } 6230 return; 6231 case 'i': { 6232 // Literal immediates are always ok. 6233 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 6234 Result = DAG.getTargetConstant(CST->getValue(), Op.getValueType()); 6235 break; 6236 } 6237 6238 // If we are in non-pic codegen mode, we allow the address of a global (with 6239 // an optional displacement) to be used with 'i'. 6240 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 6241 int64_t Offset = 0; 6242 6243 // Match either (GA) or (GA+C) 6244 if (GA) { 6245 Offset = GA->getOffset(); 6246 } else if (Op.getOpcode() == ISD::ADD) { 6247 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6248 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 6249 if (C && GA) { 6250 Offset = GA->getOffset()+C->getValue(); 6251 } else { 6252 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6253 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 6254 if (C && GA) 6255 Offset = GA->getOffset()+C->getValue(); 6256 else 6257 C = 0, GA = 0; 6258 } 6259 } 6260 6261 if (GA) { 6262 // If addressing this global requires a load (e.g. in PIC mode), we can't 6263 // match. 6264 if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(), 6265 false)) 6266 return; 6267 6268 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), 6269 Offset); 6270 Result = Op; 6271 break; 6272 } 6273 6274 // Otherwise, not valid for this mode. 6275 return; 6276 } 6277 } 6278 6279 if (Result.Val) { 6280 Ops.push_back(Result); 6281 return; 6282 } 6283 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 6284} 6285 6286std::vector<unsigned> X86TargetLowering:: 6287getRegClassForInlineAsmConstraint(const std::string &Constraint, 6288 MVT::ValueType VT) const { 6289 if (Constraint.size() == 1) { 6290 // FIXME: not handling fp-stack yet! 6291 switch (Constraint[0]) { // GCC X86 Constraint Letters 6292 default: break; // Unknown constraint letter 6293 case 'A': // EAX/EDX 6294 if (VT == MVT::i32 || VT == MVT::i64) 6295 return make_vector<unsigned>(X86::EAX, X86::EDX, 0); 6296 break; 6297 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode) 6298 case 'Q': // Q_REGS 6299 if (VT == MVT::i32) 6300 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 6301 else if (VT == MVT::i16) 6302 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 6303 else if (VT == MVT::i8) 6304 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0); 6305 else if (VT == MVT::i64) 6306 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0); 6307 break; 6308 } 6309 } 6310 6311 return std::vector<unsigned>(); 6312} 6313 6314std::pair<unsigned, const TargetRegisterClass*> 6315X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 6316 MVT::ValueType VT) const { 6317 // First, see if this is a constraint that directly corresponds to an LLVM 6318 // register class. 6319 if (Constraint.size() == 1) { 6320 // GCC Constraint Letters 6321 switch (Constraint[0]) { 6322 default: break; 6323 case 'r': // GENERAL_REGS 6324 case 'R': // LEGACY_REGS 6325 case 'l': // INDEX_REGS 6326 if (VT == MVT::i64 && Subtarget->is64Bit()) 6327 return std::make_pair(0U, X86::GR64RegisterClass); 6328 if (VT == MVT::i32) 6329 return std::make_pair(0U, X86::GR32RegisterClass); 6330 else if (VT == MVT::i16) 6331 return std::make_pair(0U, X86::GR16RegisterClass); 6332 else if (VT == MVT::i8) 6333 return std::make_pair(0U, X86::GR8RegisterClass); 6334 break; 6335 case 'y': // MMX_REGS if MMX allowed. 6336 if (!Subtarget->hasMMX()) break; 6337 return std::make_pair(0U, X86::VR64RegisterClass); 6338 break; 6339 case 'Y': // SSE_REGS if SSE2 allowed 6340 if (!Subtarget->hasSSE2()) break; 6341 // FALL THROUGH. 6342 case 'x': // SSE_REGS if SSE1 allowed 6343 if (!Subtarget->hasSSE1()) break; 6344 6345 switch (VT) { 6346 default: break; 6347 // Scalar SSE types. 6348 case MVT::f32: 6349 case MVT::i32: 6350 return std::make_pair(0U, X86::FR32RegisterClass); 6351 case MVT::f64: 6352 case MVT::i64: 6353 return std::make_pair(0U, X86::FR64RegisterClass); 6354 // Vector types. 6355 case MVT::v16i8: 6356 case MVT::v8i16: 6357 case MVT::v4i32: 6358 case MVT::v2i64: 6359 case MVT::v4f32: 6360 case MVT::v2f64: 6361 return std::make_pair(0U, X86::VR128RegisterClass); 6362 } 6363 break; 6364 } 6365 } 6366 6367 // Use the default implementation in TargetLowering to convert the register 6368 // constraint into a member of a register class. 6369 std::pair<unsigned, const TargetRegisterClass*> Res; 6370 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 6371 6372 // Not found as a standard register? 6373 if (Res.second == 0) { 6374 // GCC calls "st(0)" just plain "st". 6375 if (StringsEqualNoCase("{st}", Constraint)) { 6376 Res.first = X86::ST0; 6377 Res.second = X86::RFP80RegisterClass; 6378 } 6379 6380 return Res; 6381 } 6382 6383 // Otherwise, check to see if this is a register class of the wrong value 6384 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 6385 // turn into {ax},{dx}. 6386 if (Res.second->hasType(VT)) 6387 return Res; // Correct type already, nothing to do. 6388 6389 // All of the single-register GCC register classes map their values onto 6390 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 6391 // really want an 8-bit or 32-bit register, map to the appropriate register 6392 // class and return the appropriate register. 6393 if (Res.second != X86::GR16RegisterClass) 6394 return Res; 6395 6396 if (VT == MVT::i8) { 6397 unsigned DestReg = 0; 6398 switch (Res.first) { 6399 default: break; 6400 case X86::AX: DestReg = X86::AL; break; 6401 case X86::DX: DestReg = X86::DL; break; 6402 case X86::CX: DestReg = X86::CL; break; 6403 case X86::BX: DestReg = X86::BL; break; 6404 } 6405 if (DestReg) { 6406 Res.first = DestReg; 6407 Res.second = Res.second = X86::GR8RegisterClass; 6408 } 6409 } else if (VT == MVT::i32) { 6410 unsigned DestReg = 0; 6411 switch (Res.first) { 6412 default: break; 6413 case X86::AX: DestReg = X86::EAX; break; 6414 case X86::DX: DestReg = X86::EDX; break; 6415 case X86::CX: DestReg = X86::ECX; break; 6416 case X86::BX: DestReg = X86::EBX; break; 6417 case X86::SI: DestReg = X86::ESI; break; 6418 case X86::DI: DestReg = X86::EDI; break; 6419 case X86::BP: DestReg = X86::EBP; break; 6420 case X86::SP: DestReg = X86::ESP; break; 6421 } 6422 if (DestReg) { 6423 Res.first = DestReg; 6424 Res.second = Res.second = X86::GR32RegisterClass; 6425 } 6426 } else if (VT == MVT::i64) { 6427 unsigned DestReg = 0; 6428 switch (Res.first) { 6429 default: break; 6430 case X86::AX: DestReg = X86::RAX; break; 6431 case X86::DX: DestReg = X86::RDX; break; 6432 case X86::CX: DestReg = X86::RCX; break; 6433 case X86::BX: DestReg = X86::RBX; break; 6434 case X86::SI: DestReg = X86::RSI; break; 6435 case X86::DI: DestReg = X86::RDI; break; 6436 case X86::BP: DestReg = X86::RBP; break; 6437 case X86::SP: DestReg = X86::RSP; break; 6438 } 6439 if (DestReg) { 6440 Res.first = DestReg; 6441 Res.second = Res.second = X86::GR64RegisterClass; 6442 } 6443 } 6444 6445 return Res; 6446} 6447