X86ISelLowering.cpp revision b8cafe3427a168414400e5dfcbea78996792d2c3
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86ISelLowering.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86TargetMachine.h" 20#include "llvm/CallingConv.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/ADT/BitVector.h" 27#include "llvm/ADT/VectorExtras.h" 28#include "llvm/Analysis/ScalarEvolutionExpressions.h" 29#include "llvm/CodeGen/CallingConvLower.h" 30#include "llvm/CodeGen/MachineFrameInfo.h" 31#include "llvm/CodeGen/MachineFunction.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/MachineModuleInfo.h" 34#include "llvm/CodeGen/MachineRegisterInfo.h" 35#include "llvm/CodeGen/PseudoSourceValue.h" 36#include "llvm/CodeGen/SelectionDAG.h" 37#include "llvm/Support/MathExtras.h" 38#include "llvm/Support/Debug.h" 39#include "llvm/Target/TargetOptions.h" 40#include "llvm/ADT/SmallSet.h" 41#include "llvm/ADT/StringExtras.h" 42#include "llvm/ParamAttrsList.h" 43using namespace llvm; 44 45X86TargetLowering::X86TargetLowering(TargetMachine &TM) 46 : TargetLowering(TM) { 47 Subtarget = &TM.getSubtarget<X86Subtarget>(); 48 X86ScalarSSEf64 = Subtarget->hasSSE2(); 49 X86ScalarSSEf32 = Subtarget->hasSSE1(); 50 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 51 52 bool Fast = false; 53 54 RegInfo = TM.getRegisterInfo(); 55 56 // Set up the TargetLowering object. 57 58 // X86 is weird, it always uses i8 for shift amounts and setcc results. 59 setShiftAmountType(MVT::i8); 60 setSetCCResultType(MVT::i8); 61 setSetCCResultContents(ZeroOrOneSetCCResult); 62 setSchedulingPreference(SchedulingForRegPressure); 63 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0 64 setStackPointerRegisterToSaveRestore(X86StackPtr); 65 66 if (Subtarget->isTargetDarwin()) { 67 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 68 setUseUnderscoreSetJmp(false); 69 setUseUnderscoreLongJmp(false); 70 } else if (Subtarget->isTargetMingw()) { 71 // MS runtime is weird: it exports _setjmp, but longjmp! 72 setUseUnderscoreSetJmp(true); 73 setUseUnderscoreLongJmp(false); 74 } else { 75 setUseUnderscoreSetJmp(true); 76 setUseUnderscoreLongJmp(true); 77 } 78 79 // Set up the register classes. 80 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 81 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 82 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 83 if (Subtarget->is64Bit()) 84 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 85 86 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote); 87 88 // We don't accept any truncstore of integer registers. 89 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 90 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 91 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 92 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 93 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 94 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 95 96 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 97 // operation. 98 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 99 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 100 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 101 102 if (Subtarget->is64Bit()) { 103 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 104 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 105 } else { 106 if (X86ScalarSSEf64) 107 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP. 108 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand); 109 else 110 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 111 } 112 113 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 114 // this operation. 115 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 116 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 117 // SSE has no i16 to fp conversion, only i32 118 if (X86ScalarSSEf32) { 119 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 120 // f32 and f64 cases are Legal, f80 case is not 121 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 122 } else { 123 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 124 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 125 } 126 127 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 128 // are Legal, f80 is custom lowered. 129 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 130 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 131 132 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 133 // this operation. 134 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 135 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 136 137 if (X86ScalarSSEf32) { 138 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 139 // f32 and f64 cases are Legal, f80 case is not 140 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 141 } else { 142 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 143 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 144 } 145 146 // Handle FP_TO_UINT by promoting the destination to a larger signed 147 // conversion. 148 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 149 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 150 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 151 152 if (Subtarget->is64Bit()) { 153 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 154 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 155 } else { 156 if (X86ScalarSSEf32 && !Subtarget->hasSSE3()) 157 // Expand FP_TO_UINT into a select. 158 // FIXME: We would like to use a Custom expander here eventually to do 159 // the optimal thing for SSE vs. the default expansion in the legalizer. 160 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 161 else 162 // With SSE3 we can use fisttpll to convert to a signed i64. 163 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 164 } 165 166 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 167 if (!X86ScalarSSEf64) { 168 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 169 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 170 } 171 172 // Scalar integer divide and remainder are lowered to use operations that 173 // produce two results, to match the available instructions. This exposes 174 // the two-result form to trivial CSE, which is able to combine x/y and x%y 175 // into a single instruction. 176 // 177 // Scalar integer multiply-high is also lowered to use two-result 178 // operations, to match the available instructions. However, plain multiply 179 // (low) operations are left as Legal, as there are single-result 180 // instructions for this in x86. Using the two-result multiply instructions 181 // when both high and low results are needed must be arranged by dagcombine. 182 setOperationAction(ISD::MULHS , MVT::i8 , Expand); 183 setOperationAction(ISD::MULHU , MVT::i8 , Expand); 184 setOperationAction(ISD::SDIV , MVT::i8 , Expand); 185 setOperationAction(ISD::UDIV , MVT::i8 , Expand); 186 setOperationAction(ISD::SREM , MVT::i8 , Expand); 187 setOperationAction(ISD::UREM , MVT::i8 , Expand); 188 setOperationAction(ISD::MULHS , MVT::i16 , Expand); 189 setOperationAction(ISD::MULHU , MVT::i16 , Expand); 190 setOperationAction(ISD::SDIV , MVT::i16 , Expand); 191 setOperationAction(ISD::UDIV , MVT::i16 , Expand); 192 setOperationAction(ISD::SREM , MVT::i16 , Expand); 193 setOperationAction(ISD::UREM , MVT::i16 , Expand); 194 setOperationAction(ISD::MULHS , MVT::i32 , Expand); 195 setOperationAction(ISD::MULHU , MVT::i32 , Expand); 196 setOperationAction(ISD::SDIV , MVT::i32 , Expand); 197 setOperationAction(ISD::UDIV , MVT::i32 , Expand); 198 setOperationAction(ISD::SREM , MVT::i32 , Expand); 199 setOperationAction(ISD::UREM , MVT::i32 , Expand); 200 setOperationAction(ISD::MULHS , MVT::i64 , Expand); 201 setOperationAction(ISD::MULHU , MVT::i64 , Expand); 202 setOperationAction(ISD::SDIV , MVT::i64 , Expand); 203 setOperationAction(ISD::UDIV , MVT::i64 , Expand); 204 setOperationAction(ISD::SREM , MVT::i64 , Expand); 205 setOperationAction(ISD::UREM , MVT::i64 , Expand); 206 207 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 208 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 209 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 210 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 211 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); 212 if (Subtarget->is64Bit()) 213 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 214 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 215 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 216 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 217 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 218 setOperationAction(ISD::FREM , MVT::f32 , Expand); 219 setOperationAction(ISD::FREM , MVT::f64 , Expand); 220 setOperationAction(ISD::FREM , MVT::f80 , Expand); 221 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 222 223 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 224 setOperationAction(ISD::CTTZ , MVT::i8 , Custom); 225 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 226 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 227 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 228 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 229 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 230 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 231 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 232 if (Subtarget->is64Bit()) { 233 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 234 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 235 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 236 } 237 238 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 239 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 240 241 // These should be promoted to a larger select which is supported. 242 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 243 setOperationAction(ISD::SELECT , MVT::i8 , Promote); 244 // X86 wants to expand cmov itself. 245 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 246 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 247 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 248 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 249 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 250 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 251 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 252 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 253 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 254 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 255 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 256 if (Subtarget->is64Bit()) { 257 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 258 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 259 } 260 // X86 ret instruction may pop stack. 261 setOperationAction(ISD::RET , MVT::Other, Custom); 262 if (!Subtarget->is64Bit()) 263 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 264 265 // Darwin ABI issue. 266 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 267 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 268 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 269 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 270 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 271 if (Subtarget->is64Bit()) { 272 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 273 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 274 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 275 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 276 } 277 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 278 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 279 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 280 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 281 if (Subtarget->is64Bit()) { 282 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 283 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 284 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 285 } 286 // X86 wants to expand memset / memcpy itself. 287 setOperationAction(ISD::MEMSET , MVT::Other, Custom); 288 setOperationAction(ISD::MEMCPY , MVT::Other, Custom); 289 290 if (!Subtarget->hasSSE1()) 291 setOperationAction(ISD::PREFETCH , MVT::Other, Expand); 292 293 if (!Subtarget->hasSSE2()) 294 setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand); 295 296 setOperationAction(ISD::ATOMIC_LCS , MVT::i8, Custom); 297 setOperationAction(ISD::ATOMIC_LCS , MVT::i16, Custom); 298 setOperationAction(ISD::ATOMIC_LCS , MVT::i32, Custom); 299 setOperationAction(ISD::ATOMIC_LCS , MVT::i64, Custom); 300 301 // Use the default ISD::LOCATION, ISD::DECLARE expansion. 302 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 303 // FIXME - use subtarget debug flags 304 if (!Subtarget->isTargetDarwin() && 305 !Subtarget->isTargetELF() && 306 !Subtarget->isTargetCygMing()) 307 setOperationAction(ISD::LABEL, MVT::Other, Expand); 308 309 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 310 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 311 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 312 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 313 if (Subtarget->is64Bit()) { 314 // FIXME: Verify 315 setExceptionPointerRegister(X86::RAX); 316 setExceptionSelectorRegister(X86::RDX); 317 } else { 318 setExceptionPointerRegister(X86::EAX); 319 setExceptionSelectorRegister(X86::EDX); 320 } 321 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 322 323 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 324 325 setOperationAction(ISD::TRAP, MVT::Other, Legal); 326 327 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 328 setOperationAction(ISD::VASTART , MVT::Other, Custom); 329 setOperationAction(ISD::VAARG , MVT::Other, Expand); 330 setOperationAction(ISD::VAEND , MVT::Other, Expand); 331 if (Subtarget->is64Bit()) 332 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 333 else 334 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 335 336 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 337 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 338 if (Subtarget->is64Bit()) 339 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 340 if (Subtarget->isTargetCygMing()) 341 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 342 else 343 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 344 345 if (X86ScalarSSEf64) { 346 // f32 and f64 use SSE. 347 // Set up the FP register classes. 348 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 349 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 350 351 // Use ANDPD to simulate FABS. 352 setOperationAction(ISD::FABS , MVT::f64, Custom); 353 setOperationAction(ISD::FABS , MVT::f32, Custom); 354 355 // Use XORP to simulate FNEG. 356 setOperationAction(ISD::FNEG , MVT::f64, Custom); 357 setOperationAction(ISD::FNEG , MVT::f32, Custom); 358 359 // Use ANDPD and ORPD to simulate FCOPYSIGN. 360 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 361 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 362 363 // We don't support sin/cos/fmod 364 setOperationAction(ISD::FSIN , MVT::f64, Expand); 365 setOperationAction(ISD::FCOS , MVT::f64, Expand); 366 setOperationAction(ISD::FSIN , MVT::f32, Expand); 367 setOperationAction(ISD::FCOS , MVT::f32, Expand); 368 369 // Expand FP immediates into loads from the stack, except for the special 370 // cases we handle. 371 addLegalFPImmediate(APFloat(+0.0)); // xorpd 372 addLegalFPImmediate(APFloat(+0.0f)); // xorps 373 374 // Floating truncations from f80 and extensions to f80 go through memory. 375 // If optimizing, we lie about this though and handle it in 376 // InstructionSelectPreprocess so that dagcombine2 can hack on these. 377 if (Fast) { 378 setConvertAction(MVT::f32, MVT::f80, Expand); 379 setConvertAction(MVT::f64, MVT::f80, Expand); 380 setConvertAction(MVT::f80, MVT::f32, Expand); 381 setConvertAction(MVT::f80, MVT::f64, Expand); 382 } 383 } else if (X86ScalarSSEf32) { 384 // Use SSE for f32, x87 for f64. 385 // Set up the FP register classes. 386 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 387 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 388 389 // Use ANDPS to simulate FABS. 390 setOperationAction(ISD::FABS , MVT::f32, Custom); 391 392 // Use XORP to simulate FNEG. 393 setOperationAction(ISD::FNEG , MVT::f32, Custom); 394 395 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 396 397 // Use ANDPS and ORPS to simulate FCOPYSIGN. 398 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 399 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 400 401 // We don't support sin/cos/fmod 402 setOperationAction(ISD::FSIN , MVT::f32, Expand); 403 setOperationAction(ISD::FCOS , MVT::f32, Expand); 404 405 // Special cases we handle for FP constants. 406 addLegalFPImmediate(APFloat(+0.0f)); // xorps 407 addLegalFPImmediate(APFloat(+0.0)); // FLD0 408 addLegalFPImmediate(APFloat(+1.0)); // FLD1 409 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 410 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 411 412 // SSE <-> X87 conversions go through memory. If optimizing, we lie about 413 // this though and handle it in InstructionSelectPreprocess so that 414 // dagcombine2 can hack on these. 415 if (Fast) { 416 setConvertAction(MVT::f32, MVT::f64, Expand); 417 setConvertAction(MVT::f32, MVT::f80, Expand); 418 setConvertAction(MVT::f80, MVT::f32, Expand); 419 setConvertAction(MVT::f64, MVT::f32, Expand); 420 // And x87->x87 truncations also. 421 setConvertAction(MVT::f80, MVT::f64, Expand); 422 } 423 424 if (!UnsafeFPMath) { 425 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 426 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 427 } 428 } else { 429 // f32 and f64 in x87. 430 // Set up the FP register classes. 431 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 432 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 433 434 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 435 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 436 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 437 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 438 439 // Floating truncations go through memory. If optimizing, we lie about 440 // this though and handle it in InstructionSelectPreprocess so that 441 // dagcombine2 can hack on these. 442 if (Fast) { 443 setConvertAction(MVT::f80, MVT::f32, Expand); 444 setConvertAction(MVT::f64, MVT::f32, Expand); 445 setConvertAction(MVT::f80, MVT::f64, Expand); 446 } 447 448 if (!UnsafeFPMath) { 449 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 450 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 451 } 452 addLegalFPImmediate(APFloat(+0.0)); // FLD0 453 addLegalFPImmediate(APFloat(+1.0)); // FLD1 454 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 455 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 456 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 457 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 458 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 459 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 460 } 461 462 // Long double always uses X87. 463 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 464 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 465 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 466 { 467 APFloat TmpFlt(+0.0); 468 TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 469 addLegalFPImmediate(TmpFlt); // FLD0 470 TmpFlt.changeSign(); 471 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 472 APFloat TmpFlt2(+1.0); 473 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 474 addLegalFPImmediate(TmpFlt2); // FLD1 475 TmpFlt2.changeSign(); 476 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 477 } 478 479 if (!UnsafeFPMath) { 480 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 481 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 482 } 483 484 // Always use a library call for pow. 485 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 486 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 487 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 488 489 // First set operation action for all vector types to expand. Then we 490 // will selectively turn on ones that can be effectively codegen'd. 491 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 492 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 493 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); 494 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); 495 setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); 496 setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); 497 setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); 498 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 499 setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); 500 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 501 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 502 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 503 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 504 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 505 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); 506 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); 507 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 508 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 509 setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand); 510 setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand); 511 setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand); 512 setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand); 513 setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand); 514 setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand); 515 setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand); 516 setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand); 517 setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand); 518 setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand); 519 setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand); 520 setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand); 521 setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand); 522 setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand); 523 setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand); 524 setOperationAction(ISD::SHL, (MVT::ValueType)VT, Expand); 525 setOperationAction(ISD::SRA, (MVT::ValueType)VT, Expand); 526 setOperationAction(ISD::SRL, (MVT::ValueType)VT, Expand); 527 setOperationAction(ISD::ROTL, (MVT::ValueType)VT, Expand); 528 setOperationAction(ISD::ROTR, (MVT::ValueType)VT, Expand); 529 setOperationAction(ISD::BSWAP, (MVT::ValueType)VT, Expand); 530 } 531 532 if (Subtarget->hasMMX()) { 533 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass); 534 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass); 535 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass); 536 addRegisterClass(MVT::v1i64, X86::VR64RegisterClass); 537 538 // FIXME: add MMX packed arithmetics 539 540 setOperationAction(ISD::ADD, MVT::v8i8, Legal); 541 setOperationAction(ISD::ADD, MVT::v4i16, Legal); 542 setOperationAction(ISD::ADD, MVT::v2i32, Legal); 543 setOperationAction(ISD::ADD, MVT::v1i64, Legal); 544 545 setOperationAction(ISD::SUB, MVT::v8i8, Legal); 546 setOperationAction(ISD::SUB, MVT::v4i16, Legal); 547 setOperationAction(ISD::SUB, MVT::v2i32, Legal); 548 setOperationAction(ISD::SUB, MVT::v1i64, Legal); 549 550 setOperationAction(ISD::MULHS, MVT::v4i16, Legal); 551 setOperationAction(ISD::MUL, MVT::v4i16, Legal); 552 553 setOperationAction(ISD::AND, MVT::v8i8, Promote); 554 AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64); 555 setOperationAction(ISD::AND, MVT::v4i16, Promote); 556 AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64); 557 setOperationAction(ISD::AND, MVT::v2i32, Promote); 558 AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64); 559 setOperationAction(ISD::AND, MVT::v1i64, Legal); 560 561 setOperationAction(ISD::OR, MVT::v8i8, Promote); 562 AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64); 563 setOperationAction(ISD::OR, MVT::v4i16, Promote); 564 AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64); 565 setOperationAction(ISD::OR, MVT::v2i32, Promote); 566 AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64); 567 setOperationAction(ISD::OR, MVT::v1i64, Legal); 568 569 setOperationAction(ISD::XOR, MVT::v8i8, Promote); 570 AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64); 571 setOperationAction(ISD::XOR, MVT::v4i16, Promote); 572 AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64); 573 setOperationAction(ISD::XOR, MVT::v2i32, Promote); 574 AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64); 575 setOperationAction(ISD::XOR, MVT::v1i64, Legal); 576 577 setOperationAction(ISD::LOAD, MVT::v8i8, Promote); 578 AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64); 579 setOperationAction(ISD::LOAD, MVT::v4i16, Promote); 580 AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64); 581 setOperationAction(ISD::LOAD, MVT::v2i32, Promote); 582 AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64); 583 setOperationAction(ISD::LOAD, MVT::v1i64, Legal); 584 585 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); 586 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 587 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); 588 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); 589 590 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 591 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 592 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); 593 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); 594 595 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom); 596 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom); 597 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom); 598 } 599 600 if (Subtarget->hasSSE1()) { 601 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 602 603 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 604 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 605 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 606 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 607 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 608 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 609 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 610 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 611 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 612 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 613 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 614 } 615 616 if (Subtarget->hasSSE2()) { 617 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 618 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 619 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 620 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 621 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 622 623 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 624 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 625 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 626 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 627 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 628 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 629 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 630 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 631 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 632 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 633 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 634 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 635 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 636 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 637 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 638 639 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 640 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 641 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 642 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 643 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 644 645 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 646 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 647 // Do not attempt to custom lower non-power-of-2 vectors 648 if (!isPowerOf2_32(MVT::getVectorNumElements(VT))) 649 continue; 650 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); 651 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); 652 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); 653 } 654 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 655 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 656 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 657 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 658 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 659 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 660 if (Subtarget->is64Bit()) { 661 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 662 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 663 } 664 665 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 666 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 667 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); 668 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); 669 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); 670 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); 671 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); 672 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); 673 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); 674 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); 675 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 676 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); 677 } 678 679 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 680 681 // Custom lower v2i64 and v2f64 selects. 682 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 683 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 684 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 685 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 686 } 687 688 if (Subtarget->hasSSE41()) { 689 // FIXME: Do we need to handle scalar-to-vector here? 690 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 691 692 // i8 and i16 vectors are custom , because the source register and source 693 // source memory operand types are not the same width. f32 vectors are 694 // custom since the immediate controlling the insert encodes additional 695 // information. 696 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 697 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 698 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Legal); 699 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 700 701 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 702 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 703 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 704 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 705 706 if (Subtarget->is64Bit()) { 707 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal); 708 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 709 } 710 } 711 712 // We want to custom lower some of our intrinsics. 713 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 714 715 // We have target-specific dag combine patterns for the following nodes: 716 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 717 setTargetDAGCombine(ISD::SELECT); 718 setTargetDAGCombine(ISD::STORE); 719 720 computeRegisterProperties(); 721 722 // FIXME: These should be based on subtarget info. Plus, the values should 723 // be smaller when we are in optimizing for size mode. 724 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores 725 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores 726 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores 727 allowUnalignedMemoryAccesses = true; // x86 supports it! 728 setPrefLoopAlignment(16); 729} 730 731/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 732/// the desired ByVal argument alignment. 733static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) { 734 if (MaxAlign == 16) 735 return; 736 if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 737 if (VTy->getBitWidth() == 128) 738 MaxAlign = 16; 739 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 740 unsigned EltAlign = 0; 741 getMaxByValAlign(ATy->getElementType(), EltAlign); 742 if (EltAlign > MaxAlign) 743 MaxAlign = EltAlign; 744 } else if (const StructType *STy = dyn_cast<StructType>(Ty)) { 745 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 746 unsigned EltAlign = 0; 747 getMaxByValAlign(STy->getElementType(i), EltAlign); 748 if (EltAlign > MaxAlign) 749 MaxAlign = EltAlign; 750 if (MaxAlign == 16) 751 break; 752 } 753 } 754 return; 755} 756 757/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 758/// function arguments in the caller parameter area. For X86, aggregates 759/// that contain SSE vectors are placed at 16-byte boundaries while the rest 760/// are at 4-byte boundaries. 761unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const { 762 if (Subtarget->is64Bit()) 763 return getTargetData()->getABITypeAlignment(Ty); 764 unsigned Align = 4; 765 if (Subtarget->hasSSE1()) 766 getMaxByValAlign(Ty, Align); 767 return Align; 768} 769 770/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 771/// jumptable. 772SDOperand X86TargetLowering::getPICJumpTableRelocBase(SDOperand Table, 773 SelectionDAG &DAG) const { 774 if (usesGlobalOffsetTable()) 775 return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy()); 776 if (!Subtarget->isPICStyleRIPRel()) 777 return DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()); 778 return Table; 779} 780 781//===----------------------------------------------------------------------===// 782// Return Value Calling Convention Implementation 783//===----------------------------------------------------------------------===// 784 785#include "X86GenCallingConv.inc" 786 787/// GetPossiblePreceedingTailCall - Get preceeding X86ISD::TAILCALL node if it 788/// exists skip possible ISD:TokenFactor. 789static SDOperand GetPossiblePreceedingTailCall(SDOperand Chain) { 790 if (Chain.getOpcode() == X86ISD::TAILCALL) { 791 return Chain; 792 } else if (Chain.getOpcode() == ISD::TokenFactor) { 793 if (Chain.getNumOperands() && 794 Chain.getOperand(0).getOpcode() == X86ISD::TAILCALL) 795 return Chain.getOperand(0); 796 } 797 return Chain; 798} 799 800/// LowerRET - Lower an ISD::RET node. 801SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { 802 assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args"); 803 804 SmallVector<CCValAssign, 16> RVLocs; 805 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 806 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 807 CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs); 808 CCInfo.AnalyzeReturn(Op.Val, RetCC_X86); 809 810 // If this is the first return lowered for this function, add the regs to the 811 // liveout set for the function. 812 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 813 for (unsigned i = 0; i != RVLocs.size(); ++i) 814 if (RVLocs[i].isRegLoc()) 815 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 816 } 817 SDOperand Chain = Op.getOperand(0); 818 819 // Handle tail call return. 820 Chain = GetPossiblePreceedingTailCall(Chain); 821 if (Chain.getOpcode() == X86ISD::TAILCALL) { 822 SDOperand TailCall = Chain; 823 SDOperand TargetAddress = TailCall.getOperand(1); 824 SDOperand StackAdjustment = TailCall.getOperand(2); 825 assert(((TargetAddress.getOpcode() == ISD::Register && 826 (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::ECX || 827 cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) || 828 TargetAddress.getOpcode() == ISD::TargetExternalSymbol || 829 TargetAddress.getOpcode() == ISD::TargetGlobalAddress) && 830 "Expecting an global address, external symbol, or register"); 831 assert(StackAdjustment.getOpcode() == ISD::Constant && 832 "Expecting a const value"); 833 834 SmallVector<SDOperand,8> Operands; 835 Operands.push_back(Chain.getOperand(0)); 836 Operands.push_back(TargetAddress); 837 Operands.push_back(StackAdjustment); 838 // Copy registers used by the call. Last operand is a flag so it is not 839 // copied. 840 for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) { 841 Operands.push_back(Chain.getOperand(i)); 842 } 843 return DAG.getNode(X86ISD::TC_RETURN, MVT::Other, &Operands[0], 844 Operands.size()); 845 } 846 847 // Regular return. 848 SDOperand Flag; 849 850 // Copy the result values into the output registers. 851 if (RVLocs.size() != 1 || !RVLocs[0].isRegLoc() || 852 RVLocs[0].getLocReg() != X86::ST0) { 853 for (unsigned i = 0; i != RVLocs.size(); ++i) { 854 CCValAssign &VA = RVLocs[i]; 855 assert(VA.isRegLoc() && "Can only return in registers!"); 856 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1), 857 Flag); 858 Flag = Chain.getValue(1); 859 } 860 } else { 861 // We need to handle a destination of ST0 specially, because it isn't really 862 // a register. 863 SDOperand Value = Op.getOperand(1); 864 865 // an XMM register onto the fp-stack. Do this with an FP_EXTEND to f80. 866 // This will get legalized into a load/store if it can't get optimized away. 867 if (isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) 868 Value = DAG.getNode(ISD::FP_EXTEND, MVT::f80, Value); 869 870 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 871 SDOperand Ops[] = { Chain, Value }; 872 Chain = DAG.getNode(X86ISD::FP_SET_ST0, Tys, Ops, 2); 873 Flag = Chain.getValue(1); 874 } 875 876 SDOperand BytesToPop = DAG.getConstant(getBytesToPopOnReturn(), MVT::i16); 877 if (Flag.Val) 878 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop, Flag); 879 else 880 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop); 881} 882 883 884/// LowerCallResult - Lower the result values of an ISD::CALL into the 885/// appropriate copies out of appropriate physical registers. This assumes that 886/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call 887/// being lowered. The returns a SDNode with the same number of values as the 888/// ISD::CALL. 889SDNode *X86TargetLowering:: 890LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, 891 unsigned CallingConv, SelectionDAG &DAG) { 892 893 // Assign locations to each value returned by this call. 894 SmallVector<CCValAssign, 16> RVLocs; 895 bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0; 896 CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs); 897 CCInfo.AnalyzeCallResult(TheCall, RetCC_X86); 898 899 SmallVector<SDOperand, 8> ResultVals; 900 901 // Copy all of the result registers out of their specified physreg. 902 if (RVLocs.size() != 1 || RVLocs[0].getLocReg() != X86::ST0) { 903 for (unsigned i = 0; i != RVLocs.size(); ++i) { 904 Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(), 905 RVLocs[i].getValVT(), InFlag).getValue(1); 906 InFlag = Chain.getValue(2); 907 ResultVals.push_back(Chain.getValue(0)); 908 } 909 } else { 910 // Copies from the FP stack are special, as ST0 isn't a valid register 911 // before the fp stackifier runs. 912 913 // Copy ST0 into an RFP register with FP_GET_RESULT. If this will end up 914 // in an SSE register, copy it out as F80 and do a truncate, otherwise use 915 // the specified value type. 916 MVT::ValueType GetResultTy = RVLocs[0].getValVT(); 917 if (isScalarFPTypeInSSEReg(GetResultTy)) 918 GetResultTy = MVT::f80; 919 SDVTList Tys = DAG.getVTList(GetResultTy, MVT::Other, MVT::Flag); 920 SDOperand GROps[] = { Chain, InFlag }; 921 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_ST0, Tys, GROps, 2); 922 Chain = RetVal.getValue(1); 923 InFlag = RetVal.getValue(2); 924 925 // If we want the result in an SSE register, use an FP_TRUNCATE to get it 926 // there. 927 if (GetResultTy != RVLocs[0].getValVT()) 928 RetVal = DAG.getNode(ISD::FP_ROUND, RVLocs[0].getValVT(), RetVal, 929 // This truncation won't change the value. 930 DAG.getIntPtrConstant(1)); 931 932 ResultVals.push_back(RetVal); 933 } 934 935 // Merge everything together with a MERGE_VALUES node. 936 ResultVals.push_back(Chain); 937 return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(), 938 &ResultVals[0], ResultVals.size()).Val; 939} 940 941/// LowerCallResultToTwo64BitRegs - Lower the result values of an x86-64 942/// ISD::CALL where the results are known to be in two 64-bit registers, 943/// e.g. XMM0 and XMM1. This simplify store the two values back to the 944/// fixed stack slot allocated for StructRet. 945SDNode *X86TargetLowering:: 946LowerCallResultToTwo64BitRegs(SDOperand Chain, SDOperand InFlag, 947 SDNode *TheCall, unsigned Reg1, unsigned Reg2, 948 MVT::ValueType VT, SelectionDAG &DAG) { 949 SDOperand RetVal1 = DAG.getCopyFromReg(Chain, Reg1, VT, InFlag); 950 Chain = RetVal1.getValue(1); 951 InFlag = RetVal1.getValue(2); 952 SDOperand RetVal2 = DAG.getCopyFromReg(Chain, Reg2, VT, InFlag); 953 Chain = RetVal2.getValue(1); 954 InFlag = RetVal2.getValue(2); 955 SDOperand FIN = TheCall->getOperand(5); 956 Chain = DAG.getStore(Chain, RetVal1, FIN, NULL, 0); 957 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); 958 Chain = DAG.getStore(Chain, RetVal2, FIN, NULL, 0); 959 return Chain.Val; 960} 961 962/// LowerCallResultToTwoX87Regs - Lower the result values of an x86-64 ISD::CALL 963/// where the results are known to be in ST0 and ST1. 964SDNode *X86TargetLowering:: 965LowerCallResultToTwoX87Regs(SDOperand Chain, SDOperand InFlag, 966 SDNode *TheCall, SelectionDAG &DAG) { 967 SmallVector<SDOperand, 8> ResultVals; 968 const MVT::ValueType VTs[] = { MVT::f80, MVT::f80, MVT::Other, MVT::Flag }; 969 SDVTList Tys = DAG.getVTList(VTs, 4); 970 SDOperand Ops[] = { Chain, InFlag }; 971 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_ST0_ST1, Tys, Ops, 2); 972 Chain = RetVal.getValue(2); 973 SDOperand FIN = TheCall->getOperand(5); 974 Chain = DAG.getStore(Chain, RetVal.getValue(1), FIN, NULL, 0); 975 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(16)); 976 Chain = DAG.getStore(Chain, RetVal, FIN, NULL, 0); 977 return Chain.Val; 978} 979 980//===----------------------------------------------------------------------===// 981// C & StdCall & Fast Calling Convention implementation 982//===----------------------------------------------------------------------===// 983// StdCall calling convention seems to be standard for many Windows' API 984// routines and around. It differs from C calling convention just a little: 985// callee should clean up the stack, not caller. Symbols should be also 986// decorated in some fancy way :) It doesn't support any vector arguments. 987// For info on fast calling convention see Fast Calling Convention (tail call) 988// implementation LowerX86_32FastCCCallTo. 989 990/// AddLiveIn - This helper function adds the specified physical register to the 991/// MachineFunction as a live in value. It also creates a corresponding virtual 992/// register for it. 993static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg, 994 const TargetRegisterClass *RC) { 995 assert(RC->contains(PReg) && "Not the correct regclass!"); 996 unsigned VReg = MF.getRegInfo().createVirtualRegister(RC); 997 MF.getRegInfo().addLiveIn(PReg, VReg); 998 return VReg; 999} 1000 1001/// CallIsStructReturn - Determines whether a CALL node uses struct return 1002/// semantics. 1003static bool CallIsStructReturn(SDOperand Op) { 1004 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 1005 if (!NumOps) 1006 return false; 1007 1008 ConstantSDNode *Flags = cast<ConstantSDNode>(Op.getOperand(6)); 1009 return Flags->getValue() & ISD::ParamFlags::StructReturn; 1010} 1011 1012/// ArgsAreStructReturn - Determines whether a FORMAL_ARGUMENTS node uses struct 1013/// return semantics. 1014static bool ArgsAreStructReturn(SDOperand Op) { 1015 unsigned NumArgs = Op.Val->getNumValues() - 1; 1016 if (!NumArgs) 1017 return false; 1018 1019 ConstantSDNode *Flags = cast<ConstantSDNode>(Op.getOperand(3)); 1020 return Flags->getValue() & ISD::ParamFlags::StructReturn; 1021} 1022 1023/// IsCalleePop - Determines whether a CALL or FORMAL_ARGUMENTS node requires the 1024/// callee to pop its own arguments. Callee pop is necessary to support tail 1025/// calls. 1026bool X86TargetLowering::IsCalleePop(SDOperand Op) { 1027 bool IsVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1028 if (IsVarArg) 1029 return false; 1030 1031 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { 1032 default: 1033 return false; 1034 case CallingConv::X86_StdCall: 1035 return !Subtarget->is64Bit(); 1036 case CallingConv::X86_FastCall: 1037 return !Subtarget->is64Bit(); 1038 case CallingConv::Fast: 1039 return PerformTailCallOpt; 1040 } 1041} 1042 1043/// CCAssignFnForNode - Selects the correct CCAssignFn for a CALL or 1044/// FORMAL_ARGUMENTS node. 1045CCAssignFn *X86TargetLowering::CCAssignFnForNode(SDOperand Op) const { 1046 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1047 1048 if (Subtarget->is64Bit()) { 1049 if (CC == CallingConv::Fast && PerformTailCallOpt) 1050 return CC_X86_64_TailCall; 1051 else 1052 return CC_X86_64_C; 1053 } 1054 1055 if (CC == CallingConv::X86_FastCall) 1056 return CC_X86_32_FastCall; 1057 else if (CC == CallingConv::Fast && PerformTailCallOpt) 1058 return CC_X86_32_TailCall; 1059 else 1060 return CC_X86_32_C; 1061} 1062 1063/// NameDecorationForFORMAL_ARGUMENTS - Selects the appropriate decoration to 1064/// apply to a MachineFunction containing a given FORMAL_ARGUMENTS node. 1065NameDecorationStyle 1066X86TargetLowering::NameDecorationForFORMAL_ARGUMENTS(SDOperand Op) { 1067 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1068 if (CC == CallingConv::X86_FastCall) 1069 return FastCall; 1070 else if (CC == CallingConv::X86_StdCall) 1071 return StdCall; 1072 return None; 1073} 1074 1075/// IsPossiblyOverwrittenArgumentOfTailCall - Check if the operand could 1076/// possibly be overwritten when lowering the outgoing arguments in a tail 1077/// call. Currently the implementation of this call is very conservative and 1078/// assumes all arguments sourcing from FORMAL_ARGUMENTS or a CopyFromReg with 1079/// virtual registers would be overwritten by direct lowering. 1080static bool IsPossiblyOverwrittenArgumentOfTailCall(SDOperand Op, 1081 MachineFrameInfo * MFI) { 1082 RegisterSDNode * OpReg = NULL; 1083 FrameIndexSDNode * FrameIdxNode = NULL; 1084 int FrameIdx = 0; 1085 if (Op.getOpcode() == ISD::FORMAL_ARGUMENTS || 1086 (Op.getOpcode()== ISD::CopyFromReg && 1087 (OpReg = dyn_cast<RegisterSDNode>(Op.getOperand(1))) && 1088 (OpReg->getReg() >= TargetRegisterInfo::FirstVirtualRegister)) || 1089 (Op.getOpcode() == ISD::LOAD && 1090 (FrameIdxNode = dyn_cast<FrameIndexSDNode>(Op.getOperand(1))) && 1091 (MFI->isFixedObjectIndex((FrameIdx = FrameIdxNode->getIndex()))) && 1092 (MFI->getObjectOffset(FrameIdx) >= 0))) 1093 return true; 1094 return false; 1095} 1096 1097/// CallRequiresGOTInRegister - Check whether the call requires the GOT pointer 1098/// in a register before calling. 1099bool X86TargetLowering::CallRequiresGOTPtrInReg(bool Is64Bit, bool IsTailCall) { 1100 return !IsTailCall && !Is64Bit && 1101 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1102 Subtarget->isPICStyleGOT(); 1103} 1104 1105 1106/// CallRequiresFnAddressInReg - Check whether the call requires the function 1107/// address to be loaded in a register. 1108bool 1109X86TargetLowering::CallRequiresFnAddressInReg(bool Is64Bit, bool IsTailCall) { 1110 return !Is64Bit && IsTailCall && 1111 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1112 Subtarget->isPICStyleGOT(); 1113} 1114 1115/// CopyTailCallClobberedArgumentsToVRegs - Create virtual registers for all 1116/// arguments to force loading and guarantee that arguments sourcing from 1117/// incomming parameters are not overwriting each other. 1118static SDOperand 1119CopyTailCallClobberedArgumentsToVRegs(SDOperand Chain, 1120 SmallVector<std::pair<unsigned, SDOperand>, 8> &TailCallClobberedVRegs, 1121 SelectionDAG &DAG, 1122 MachineFunction &MF, 1123 const TargetLowering * TL) { 1124 1125 SDOperand InFlag; 1126 for (unsigned i = 0, e = TailCallClobberedVRegs.size(); i != e; i++) { 1127 SDOperand Arg = TailCallClobberedVRegs[i].second; 1128 unsigned Idx = TailCallClobberedVRegs[i].first; 1129 unsigned VReg = 1130 MF.getRegInfo(). 1131 createVirtualRegister(TL->getRegClassFor(Arg.getValueType())); 1132 Chain = DAG.getCopyToReg(Chain, VReg, Arg, InFlag); 1133 InFlag = Chain.getValue(1); 1134 Arg = DAG.getCopyFromReg(Chain, VReg, Arg.getValueType(), InFlag); 1135 TailCallClobberedVRegs[i] = std::make_pair(Idx, Arg); 1136 Chain = Arg.getValue(1); 1137 InFlag = Arg.getValue(2); 1138 } 1139 return Chain; 1140} 1141 1142/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1143/// by "Src" to address "Dst" with size and alignment information specified by 1144/// the specific parameter attribute. The copy will be passed as a byval function 1145/// parameter. 1146static SDOperand 1147CreateCopyOfByValArgument(SDOperand Src, SDOperand Dst, SDOperand Chain, 1148 ISD::ParamFlags::ParamFlagsTy Flags, 1149 SelectionDAG &DAG) { 1150 unsigned Align = ISD::ParamFlags::One << 1151 ((Flags & ISD::ParamFlags::ByValAlign) >> ISD::ParamFlags::ByValAlignOffs); 1152 unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >> 1153 ISD::ParamFlags::ByValSizeOffs; 1154 SDOperand AlignNode = DAG.getConstant(Align, MVT::i32); 1155 SDOperand SizeNode = DAG.getConstant(Size, MVT::i32); 1156 SDOperand AlwaysInline = DAG.getConstant(1, MVT::i32); 1157 return DAG.getMemcpy(Chain, Dst, Src, SizeNode, AlignNode, AlwaysInline); 1158} 1159 1160SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG, 1161 const CCValAssign &VA, 1162 MachineFrameInfo *MFI, 1163 unsigned CC, 1164 SDOperand Root, unsigned i) { 1165 // Create the nodes corresponding to a load from this parameter slot. 1166 ISD::ParamFlags::ParamFlagsTy Flags = 1167 cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue(); 1168 bool AlwaysUseMutable = (CC==CallingConv::Fast) && PerformTailCallOpt; 1169 bool isByVal = Flags & ISD::ParamFlags::ByVal; 1170 bool isImmutable = !AlwaysUseMutable && !isByVal; 1171 1172 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1173 // changed with more analysis. 1174 // In case of tail call optimization mark all arguments mutable. Since they 1175 // could be overwritten by lowering of arguments in case of a tail call. 1176 int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, 1177 VA.getLocMemOffset(), isImmutable); 1178 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 1179 if (isByVal) 1180 return FIN; 1181 return DAG.getLoad(VA.getValVT(), Root, FIN, 1182 PseudoSourceValue::getFixedStack(), FI); 1183} 1184 1185SDOperand 1186X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { 1187 MachineFunction &MF = DAG.getMachineFunction(); 1188 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1189 1190 const Function* Fn = MF.getFunction(); 1191 if (Fn->hasExternalLinkage() && 1192 Subtarget->isTargetCygMing() && 1193 Fn->getName() == "main") 1194 FuncInfo->setForceFramePointer(true); 1195 1196 // Decorate the function name. 1197 FuncInfo->setDecorationStyle(NameDecorationForFORMAL_ARGUMENTS(Op)); 1198 1199 MachineFrameInfo *MFI = MF.getFrameInfo(); 1200 SDOperand Root = Op.getOperand(0); 1201 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1202 unsigned CC = MF.getFunction()->getCallingConv(); 1203 bool Is64Bit = Subtarget->is64Bit(); 1204 1205 assert(!(isVarArg && CC == CallingConv::Fast) && 1206 "Var args not supported with calling convention fastcc"); 1207 1208 // Assign locations to all of the incoming arguments. 1209 SmallVector<CCValAssign, 16> ArgLocs; 1210 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1211 CCInfo.AnalyzeFormalArguments(Op.Val, CCAssignFnForNode(Op)); 1212 1213 SmallVector<SDOperand, 8> ArgValues; 1214 unsigned LastVal = ~0U; 1215 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1216 CCValAssign &VA = ArgLocs[i]; 1217 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1218 // places. 1219 assert(VA.getValNo() != LastVal && 1220 "Don't support value assigned to multiple locs yet"); 1221 LastVal = VA.getValNo(); 1222 1223 if (VA.isRegLoc()) { 1224 MVT::ValueType RegVT = VA.getLocVT(); 1225 TargetRegisterClass *RC; 1226 if (RegVT == MVT::i32) 1227 RC = X86::GR32RegisterClass; 1228 else if (Is64Bit && RegVT == MVT::i64) 1229 RC = X86::GR64RegisterClass; 1230 else if (RegVT == MVT::f32) 1231 RC = X86::FR32RegisterClass; 1232 else if (RegVT == MVT::f64) 1233 RC = X86::FR64RegisterClass; 1234 else { 1235 assert(MVT::isVector(RegVT)); 1236 if (Is64Bit && MVT::getSizeInBits(RegVT) == 64) { 1237 RC = X86::GR64RegisterClass; // MMX values are passed in GPRs. 1238 RegVT = MVT::i64; 1239 } else 1240 RC = X86::VR128RegisterClass; 1241 } 1242 1243 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 1244 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 1245 1246 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1247 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1248 // right size. 1249 if (VA.getLocInfo() == CCValAssign::SExt) 1250 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 1251 DAG.getValueType(VA.getValVT())); 1252 else if (VA.getLocInfo() == CCValAssign::ZExt) 1253 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 1254 DAG.getValueType(VA.getValVT())); 1255 1256 if (VA.getLocInfo() != CCValAssign::Full) 1257 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 1258 1259 // Handle MMX values passed in GPRs. 1260 if (Is64Bit && RegVT != VA.getLocVT() && RC == X86::GR64RegisterClass && 1261 MVT::getSizeInBits(RegVT) == 64) 1262 ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); 1263 1264 ArgValues.push_back(ArgValue); 1265 } else { 1266 assert(VA.isMemLoc()); 1267 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, CC, Root, i)); 1268 } 1269 } 1270 1271 unsigned StackSize = CCInfo.getNextStackOffset(); 1272 // align stack specially for tail calls 1273 if (CC == CallingConv::Fast) 1274 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1275 1276 // If the function takes variable number of arguments, make a frame index for 1277 // the start of the first vararg value... for expansion of llvm.va_start. 1278 if (isVarArg) { 1279 if (Is64Bit || CC != CallingConv::X86_FastCall) { 1280 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); 1281 } 1282 if (Is64Bit) { 1283 static const unsigned GPR64ArgRegs[] = { 1284 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1285 }; 1286 static const unsigned XMMArgRegs[] = { 1287 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1288 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1289 }; 1290 1291 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 6); 1292 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1293 1294 // For X86-64, if there are vararg parameters that are passed via 1295 // registers, then we must store them to their spots on the stack so they 1296 // may be loaded by deferencing the result of va_next. 1297 VarArgsGPOffset = NumIntRegs * 8; 1298 VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16; 1299 RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16); 1300 1301 // Store the integer parameter registers. 1302 SmallVector<SDOperand, 8> MemOps; 1303 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 1304 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1305 DAG.getIntPtrConstant(VarArgsGPOffset)); 1306 for (; NumIntRegs != 6; ++NumIntRegs) { 1307 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], 1308 X86::GR64RegisterClass); 1309 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1310 SDOperand Store = 1311 DAG.getStore(Val.getValue(1), Val, FIN, 1312 PseudoSourceValue::getFixedStack(), 1313 RegSaveFrameIndex); 1314 MemOps.push_back(Store); 1315 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1316 DAG.getIntPtrConstant(8)); 1317 } 1318 1319 // Now store the XMM (fp + vector) parameter registers. 1320 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1321 DAG.getIntPtrConstant(VarArgsFPOffset)); 1322 for (; NumXMMRegs != 8; ++NumXMMRegs) { 1323 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 1324 X86::VR128RegisterClass); 1325 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); 1326 SDOperand Store = 1327 DAG.getStore(Val.getValue(1), Val, FIN, 1328 PseudoSourceValue::getFixedStack(), 1329 RegSaveFrameIndex); 1330 MemOps.push_back(Store); 1331 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1332 DAG.getIntPtrConstant(16)); 1333 } 1334 if (!MemOps.empty()) 1335 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 1336 &MemOps[0], MemOps.size()); 1337 } 1338 } 1339 1340 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1341 // arguments and the arguments after the retaddr has been pushed are 1342 // aligned. 1343 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1344 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1345 (StackSize & 7) == 0) 1346 StackSize += 4; 1347 1348 ArgValues.push_back(Root); 1349 1350 // Some CCs need callee pop. 1351 if (IsCalleePop(Op)) { 1352 BytesToPopOnReturn = StackSize; // Callee pops everything. 1353 BytesCallerReserves = 0; 1354 } else { 1355 BytesToPopOnReturn = 0; // Callee pops nothing. 1356 // If this is an sret function, the return should pop the hidden pointer. 1357 if (!Is64Bit && ArgsAreStructReturn(Op)) 1358 BytesToPopOnReturn = 4; 1359 BytesCallerReserves = StackSize; 1360 } 1361 1362 if (!Is64Bit) { 1363 RegSaveFrameIndex = 0xAAAAAAA; // RegSaveFrameIndex is X86-64 only. 1364 if (CC == CallingConv::X86_FastCall) 1365 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 1366 } 1367 1368 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 1369 1370 // Return the new list of results. 1371 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1372 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1373} 1374 1375SDOperand 1376X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG, 1377 const SDOperand &StackPtr, 1378 const CCValAssign &VA, 1379 SDOperand Chain, 1380 SDOperand Arg) { 1381 unsigned LocMemOffset = VA.getLocMemOffset(); 1382 SDOperand PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1383 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1384 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1385 ISD::ParamFlags::ParamFlagsTy Flags = 1386 cast<ConstantSDNode>(FlagsOp)->getValue(); 1387 if (Flags & ISD::ParamFlags::ByVal) { 1388 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG); 1389 } 1390 return DAG.getStore(Chain, Arg, PtrOff, 1391 PseudoSourceValue::getStack(), LocMemOffset); 1392} 1393 1394/// ClassifyX86_64SRetCallReturn - Classify how to implement a x86-64 1395/// struct return call to the specified function. X86-64 ABI specifies 1396/// some SRet calls are actually returned in registers. Since current 1397/// LLVM cannot represent multi-value calls, they are represent as 1398/// calls where the results are passed in a hidden struct provided by 1399/// the caller. This function examines the type of the struct to 1400/// determine the correct way to implement the call. 1401X86::X86_64SRet 1402X86TargetLowering::ClassifyX86_64SRetCallReturn(const Function *Fn) { 1403 // FIXME: Disabled for now. 1404 return X86::InMemory; 1405 1406 const PointerType *PTy = cast<PointerType>(Fn->arg_begin()->getType()); 1407 const Type *RTy = PTy->getElementType(); 1408 unsigned Size = getTargetData()->getABITypeSize(RTy); 1409 if (Size != 16 && Size != 32) 1410 return X86::InMemory; 1411 1412 if (Size == 32) { 1413 const StructType *STy = dyn_cast<StructType>(RTy); 1414 if (!STy) return X86::InMemory; 1415 if (STy->getNumElements() == 2 && 1416 STy->getElementType(0) == Type::X86_FP80Ty && 1417 STy->getElementType(1) == Type::X86_FP80Ty) 1418 return X86::InX87; 1419 } 1420 1421 bool AllFP = true; 1422 for (Type::subtype_iterator I = RTy->subtype_begin(), E = RTy->subtype_end(); 1423 I != E; ++I) { 1424 const Type *STy = I->get(); 1425 if (!STy->isFPOrFPVector()) { 1426 AllFP = false; 1427 break; 1428 } 1429 } 1430 1431 if (AllFP) 1432 return X86::InSSE; 1433 return X86::InGPR64; 1434} 1435 1436void X86TargetLowering::X86_64AnalyzeSRetCallOperands(SDNode *TheCall, 1437 CCAssignFn *Fn, 1438 CCState &CCInfo) { 1439 unsigned NumOps = (TheCall->getNumOperands() - 5) / 2; 1440 for (unsigned i = 1; i != NumOps; ++i) { 1441 MVT::ValueType ArgVT = TheCall->getOperand(5+2*i).getValueType(); 1442 SDOperand FlagOp = TheCall->getOperand(5+2*i+1); 1443 unsigned ArgFlags =cast<ConstantSDNode>(FlagOp)->getValue(); 1444 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo)) { 1445 cerr << "Call operand #" << i << " has unhandled type " 1446 << MVT::getValueTypeString(ArgVT) << "\n"; 1447 abort(); 1448 } 1449 } 1450} 1451 1452SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { 1453 MachineFunction &MF = DAG.getMachineFunction(); 1454 MachineFrameInfo * MFI = MF.getFrameInfo(); 1455 SDOperand Chain = Op.getOperand(0); 1456 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1457 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1458 bool IsTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0 1459 && CC == CallingConv::Fast && PerformTailCallOpt; 1460 SDOperand Callee = Op.getOperand(4); 1461 bool Is64Bit = Subtarget->is64Bit(); 1462 bool IsStructRet = CallIsStructReturn(Op); 1463 1464 assert(!(isVarArg && CC == CallingConv::Fast) && 1465 "Var args not supported with calling convention fastcc"); 1466 1467 // Analyze operands of the call, assigning locations to each operand. 1468 SmallVector<CCValAssign, 16> ArgLocs; 1469 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1470 CCAssignFn *CCFn = CCAssignFnForNode(Op); 1471 1472 X86::X86_64SRet SRetMethod = X86::InMemory; 1473 if (Is64Bit && IsStructRet) 1474 // FIXME: We can't figure out type of the sret structure for indirect 1475 // calls. We need to copy more information from CallSite to the ISD::CALL 1476 // node. 1477 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1478 SRetMethod = 1479 ClassifyX86_64SRetCallReturn(dyn_cast<Function>(G->getGlobal())); 1480 1481 // UGLY HACK! For x86-64, some 128-bit aggregates are returns in a pair of 1482 // registers. Unfortunately, llvm does not support i128 yet so we pretend it's 1483 // a sret call. 1484 if (SRetMethod != X86::InMemory) 1485 X86_64AnalyzeSRetCallOperands(Op.Val, CCFn, CCInfo); 1486 else 1487 CCInfo.AnalyzeCallOperands(Op.Val, CCFn); 1488 1489 // Get a count of how many bytes are to be pushed on the stack. 1490 unsigned NumBytes = CCInfo.getNextStackOffset(); 1491 if (CC == CallingConv::Fast) 1492 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 1493 1494 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1495 // arguments and the arguments after the retaddr has been pushed are aligned. 1496 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1497 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1498 (NumBytes & 7) == 0) 1499 NumBytes += 4; 1500 1501 int FPDiff = 0; 1502 if (IsTailCall) { 1503 // Lower arguments at fp - stackoffset + fpdiff. 1504 unsigned NumBytesCallerPushed = 1505 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 1506 FPDiff = NumBytesCallerPushed - NumBytes; 1507 1508 // Set the delta of movement of the returnaddr stackslot. 1509 // But only set if delta is greater than previous delta. 1510 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 1511 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 1512 } 1513 1514 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes)); 1515 1516 SDOperand RetAddrFrIdx; 1517 if (IsTailCall) { 1518 // Adjust the Return address stack slot. 1519 if (FPDiff) { 1520 MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32; 1521 RetAddrFrIdx = getReturnAddressFrameIndex(DAG); 1522 // Load the "old" Return address. 1523 RetAddrFrIdx = 1524 DAG.getLoad(VT, Chain,RetAddrFrIdx, NULL, 0); 1525 Chain = SDOperand(RetAddrFrIdx.Val, 1); 1526 } 1527 } 1528 1529 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1530 SmallVector<std::pair<unsigned, SDOperand>, 8> TailCallClobberedVRegs; 1531 SmallVector<SDOperand, 8> MemOpChains; 1532 1533 SDOperand StackPtr; 1534 1535 // Walk the register/memloc assignments, inserting copies/loads. For tail 1536 // calls, remember all arguments for later special lowering. 1537 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1538 CCValAssign &VA = ArgLocs[i]; 1539 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1540 1541 // Promote the value if needed. 1542 switch (VA.getLocInfo()) { 1543 default: assert(0 && "Unknown loc info!"); 1544 case CCValAssign::Full: break; 1545 case CCValAssign::SExt: 1546 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1547 break; 1548 case CCValAssign::ZExt: 1549 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1550 break; 1551 case CCValAssign::AExt: 1552 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1553 break; 1554 } 1555 1556 if (VA.isRegLoc()) { 1557 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1558 } else { 1559 if (!IsTailCall) { 1560 assert(VA.isMemLoc()); 1561 if (StackPtr.Val == 0) 1562 StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy()); 1563 1564 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1565 Arg)); 1566 } else if (IsPossiblyOverwrittenArgumentOfTailCall(Arg, MFI)) { 1567 TailCallClobberedVRegs.push_back(std::make_pair(i,Arg)); 1568 } 1569 } 1570 } 1571 1572 if (!MemOpChains.empty()) 1573 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1574 &MemOpChains[0], MemOpChains.size()); 1575 1576 // Build a sequence of copy-to-reg nodes chained together with token chain 1577 // and flag operands which copy the outgoing args into registers. 1578 SDOperand InFlag; 1579 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1580 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1581 InFlag); 1582 InFlag = Chain.getValue(1); 1583 } 1584 1585 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1586 // GOT pointer. 1587 if (CallRequiresGOTPtrInReg(Is64Bit, IsTailCall)) { 1588 Chain = DAG.getCopyToReg(Chain, X86::EBX, 1589 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 1590 InFlag); 1591 InFlag = Chain.getValue(1); 1592 } 1593 // If we are tail calling and generating PIC/GOT style code load the address 1594 // of the callee into ecx. The value in ecx is used as target of the tail 1595 // jump. This is done to circumvent the ebx/callee-saved problem for tail 1596 // calls on PIC/GOT architectures. Normally we would just put the address of 1597 // GOT into ebx and then call target@PLT. But for tail callss ebx would be 1598 // restored (since ebx is callee saved) before jumping to the target@PLT. 1599 if (CallRequiresFnAddressInReg(Is64Bit, IsTailCall)) { 1600 // Note: The actual moving to ecx is done further down. 1601 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 1602 if (G && !G->getGlobal()->hasHiddenVisibility() && 1603 !G->getGlobal()->hasProtectedVisibility()) 1604 Callee = LowerGlobalAddress(Callee, DAG); 1605 else if (isa<ExternalSymbolSDNode>(Callee)) 1606 Callee = LowerExternalSymbol(Callee,DAG); 1607 } 1608 1609 if (Is64Bit && isVarArg) { 1610 // From AMD64 ABI document: 1611 // For calls that may call functions that use varargs or stdargs 1612 // (prototype-less calls or calls to functions containing ellipsis (...) in 1613 // the declaration) %al is used as hidden argument to specify the number 1614 // of SSE registers used. The contents of %al do not need to match exactly 1615 // the number of registers, but must be an ubound on the number of SSE 1616 // registers used and is in the range 0 - 8 inclusive. 1617 1618 // Count the number of XMM registers allocated. 1619 static const unsigned XMMArgRegs[] = { 1620 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1621 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1622 }; 1623 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1624 1625 Chain = DAG.getCopyToReg(Chain, X86::AL, 1626 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 1627 InFlag = Chain.getValue(1); 1628 } 1629 1630 1631 // For tail calls lower the arguments to the 'real' stack slot. 1632 if (IsTailCall) { 1633 SmallVector<SDOperand, 8> MemOpChains2; 1634 SDOperand FIN; 1635 int FI = 0; 1636 // Do not flag preceeding copytoreg stuff together with the following stuff. 1637 InFlag = SDOperand(); 1638 1639 Chain = CopyTailCallClobberedArgumentsToVRegs(Chain, TailCallClobberedVRegs, 1640 DAG, MF, this); 1641 1642 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1643 CCValAssign &VA = ArgLocs[i]; 1644 if (!VA.isRegLoc()) { 1645 assert(VA.isMemLoc()); 1646 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1647 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1648 ISD::ParamFlags::ParamFlagsTy Flags = 1649 cast<ConstantSDNode>(FlagsOp)->getValue(); 1650 // Create frame index. 1651 int32_t Offset = VA.getLocMemOffset()+FPDiff; 1652 uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8; 1653 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); 1654 FIN = DAG.getFrameIndex(FI, MVT::i32); 1655 1656 // Find virtual register for this argument. 1657 bool Found=false; 1658 for (unsigned idx=0, e= TailCallClobberedVRegs.size(); idx < e; idx++) 1659 if (TailCallClobberedVRegs[idx].first==i) { 1660 Arg = TailCallClobberedVRegs[idx].second; 1661 Found=true; 1662 break; 1663 } 1664 assert(IsPossiblyOverwrittenArgumentOfTailCall(Arg, MFI)==false || 1665 (Found==true && "No corresponding Argument was found")); 1666 1667 if (Flags & ISD::ParamFlags::ByVal) { 1668 // Copy relative to framepointer. 1669 MemOpChains2.push_back(CreateCopyOfByValArgument(Arg, FIN, Chain, 1670 Flags, DAG)); 1671 } else { 1672 // Store relative to framepointer. 1673 MemOpChains2.push_back( 1674 DAG.getStore(Chain, Arg, FIN, 1675 PseudoSourceValue::getFixedStack(), FI)); 1676 } 1677 } 1678 } 1679 1680 if (!MemOpChains2.empty()) 1681 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1682 &MemOpChains2[0], MemOpChains2.size()); 1683 1684 // Store the return address to the appropriate stack slot. 1685 if (FPDiff) { 1686 // Calculate the new stack slot for the return address. 1687 int SlotSize = Is64Bit ? 8 : 4; 1688 int NewReturnAddrFI = 1689 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize); 1690 MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32; 1691 SDOperand NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 1692 Chain = DAG.getStore(Chain, RetAddrFrIdx, NewRetAddrFrIdx, 1693 PseudoSourceValue::getFixedStack(), NewReturnAddrFI); 1694 } 1695 } 1696 1697 // If the callee is a GlobalAddress node (quite common, every direct call is) 1698 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1699 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1700 // We should use extra load for direct calls to dllimported functions in 1701 // non-JIT mode. 1702 if ((IsTailCall || !Is64Bit || 1703 getTargetMachine().getCodeModel() != CodeModel::Large) 1704 && !Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1705 getTargetMachine(), true)) 1706 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1707 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1708 if (IsTailCall || !Is64Bit || 1709 getTargetMachine().getCodeModel() != CodeModel::Large) 1710 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1711 } else if (IsTailCall) { 1712 unsigned Opc = Is64Bit ? X86::R9 : X86::ECX; 1713 1714 Chain = DAG.getCopyToReg(Chain, 1715 DAG.getRegister(Opc, getPointerTy()), 1716 Callee,InFlag); 1717 Callee = DAG.getRegister(Opc, getPointerTy()); 1718 // Add register as live out. 1719 DAG.getMachineFunction().getRegInfo().addLiveOut(Opc); 1720 } 1721 1722 // Returns a chain & a flag for retval copy to use. 1723 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1724 SmallVector<SDOperand, 8> Ops; 1725 1726 if (IsTailCall) { 1727 Ops.push_back(Chain); 1728 Ops.push_back(DAG.getIntPtrConstant(NumBytes)); 1729 Ops.push_back(DAG.getIntPtrConstant(0)); 1730 if (InFlag.Val) 1731 Ops.push_back(InFlag); 1732 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1733 InFlag = Chain.getValue(1); 1734 1735 // Returns a chain & a flag for retval copy to use. 1736 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1737 Ops.clear(); 1738 } 1739 1740 Ops.push_back(Chain); 1741 Ops.push_back(Callee); 1742 1743 if (IsTailCall) 1744 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 1745 1746 // Add an implicit use GOT pointer in EBX. 1747 if (!IsTailCall && !Is64Bit && 1748 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1749 Subtarget->isPICStyleGOT()) 1750 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 1751 1752 // Add argument registers to the end of the list so that they are known live 1753 // into the call. 1754 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1755 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1756 RegsToPass[i].second.getValueType())); 1757 1758 if (InFlag.Val) 1759 Ops.push_back(InFlag); 1760 1761 if (IsTailCall) { 1762 assert(InFlag.Val && 1763 "Flag must be set. Depend on flag being set in LowerRET"); 1764 Chain = DAG.getNode(X86ISD::TAILCALL, 1765 Op.Val->getVTList(), &Ops[0], Ops.size()); 1766 1767 return SDOperand(Chain.Val, Op.ResNo); 1768 } 1769 1770 Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size()); 1771 InFlag = Chain.getValue(1); 1772 1773 // Create the CALLSEQ_END node. 1774 unsigned NumBytesForCalleeToPush; 1775 if (IsCalleePop(Op)) 1776 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 1777 else if (!Is64Bit && IsStructRet) 1778 // If this is is a call to a struct-return function, the callee 1779 // pops the hidden struct pointer, so we have to push it back. 1780 // This is common for Darwin/X86, Linux & Mingw32 targets. 1781 NumBytesForCalleeToPush = 4; 1782 else 1783 NumBytesForCalleeToPush = 0; // Callee pops nothing. 1784 1785 // Returns a flag for retval copy to use. 1786 Chain = DAG.getCALLSEQ_END(Chain, 1787 DAG.getIntPtrConstant(NumBytes), 1788 DAG.getIntPtrConstant(NumBytesForCalleeToPush), 1789 InFlag); 1790 InFlag = Chain.getValue(1); 1791 1792 // Handle result values, copying them out of physregs into vregs that we 1793 // return. 1794 switch (SRetMethod) { 1795 default: 1796 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1797 case X86::InGPR64: 1798 return SDOperand(LowerCallResultToTwo64BitRegs(Chain, InFlag, Op.Val, 1799 X86::RAX, X86::RDX, 1800 MVT::i64, DAG), Op.ResNo); 1801 case X86::InSSE: 1802 return SDOperand(LowerCallResultToTwo64BitRegs(Chain, InFlag, Op.Val, 1803 X86::XMM0, X86::XMM1, 1804 MVT::f64, DAG), Op.ResNo); 1805 case X86::InX87: 1806 return SDOperand(LowerCallResultToTwoX87Regs(Chain, InFlag, Op.Val, DAG), 1807 Op.ResNo); 1808 } 1809} 1810 1811 1812//===----------------------------------------------------------------------===// 1813// Fast Calling Convention (tail call) implementation 1814//===----------------------------------------------------------------------===// 1815 1816// Like std call, callee cleans arguments, convention except that ECX is 1817// reserved for storing the tail called function address. Only 2 registers are 1818// free for argument passing (inreg). Tail call optimization is performed 1819// provided: 1820// * tailcallopt is enabled 1821// * caller/callee are fastcc 1822// On X86_64 architecture with GOT-style position independent code only local 1823// (within module) calls are supported at the moment. 1824// To keep the stack aligned according to platform abi the function 1825// GetAlignedArgumentStackSize ensures that argument delta is always multiples 1826// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 1827// If a tail called function callee has more arguments than the caller the 1828// caller needs to make sure that there is room to move the RETADDR to. This is 1829// achieved by reserving an area the size of the argument delta right after the 1830// original REtADDR, but before the saved framepointer or the spilled registers 1831// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 1832// stack layout: 1833// arg1 1834// arg2 1835// RETADDR 1836// [ new RETADDR 1837// move area ] 1838// (possible EBP) 1839// ESI 1840// EDI 1841// local1 .. 1842 1843/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 1844/// for a 16 byte align requirement. 1845unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 1846 SelectionDAG& DAG) { 1847 if (PerformTailCallOpt) { 1848 MachineFunction &MF = DAG.getMachineFunction(); 1849 const TargetMachine &TM = MF.getTarget(); 1850 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 1851 unsigned StackAlignment = TFI.getStackAlignment(); 1852 uint64_t AlignMask = StackAlignment - 1; 1853 int64_t Offset = StackSize; 1854 unsigned SlotSize = Subtarget->is64Bit() ? 8 : 4; 1855 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 1856 // Number smaller than 12 so just add the difference. 1857 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 1858 } else { 1859 // Mask out lower bits, add stackalignment once plus the 12 bytes. 1860 Offset = ((~AlignMask) & Offset) + StackAlignment + 1861 (StackAlignment-SlotSize); 1862 } 1863 StackSize = Offset; 1864 } 1865 return StackSize; 1866} 1867 1868/// IsEligibleForTailCallElimination - Check to see whether the next instruction 1869/// following the call is a return. A function is eligible if caller/callee 1870/// calling conventions match, currently only fastcc supports tail calls, and 1871/// the function CALL is immediatly followed by a RET. 1872bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call, 1873 SDOperand Ret, 1874 SelectionDAG& DAG) const { 1875 if (!PerformTailCallOpt) 1876 return false; 1877 1878 // Check whether CALL node immediatly preceeds the RET node and whether the 1879 // return uses the result of the node or is a void return. 1880 unsigned NumOps = Ret.getNumOperands(); 1881 if ((NumOps == 1 && 1882 (Ret.getOperand(0) == SDOperand(Call.Val,1) || 1883 Ret.getOperand(0) == SDOperand(Call.Val,0))) || 1884 (NumOps > 1 && 1885 Ret.getOperand(0) == SDOperand(Call.Val,Call.Val->getNumValues()-1) && 1886 Ret.getOperand(1) == SDOperand(Call.Val,0))) { 1887 MachineFunction &MF = DAG.getMachineFunction(); 1888 unsigned CallerCC = MF.getFunction()->getCallingConv(); 1889 unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue(); 1890 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 1891 SDOperand Callee = Call.getOperand(4); 1892 // On x86/32Bit PIC/GOT tail calls are supported. 1893 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ || 1894 !Subtarget->isPICStyleGOT()|| !Subtarget->is64Bit()) 1895 return true; 1896 1897 // Can only do local tail calls (in same module, hidden or protected) on 1898 // x86_64 PIC/GOT at the moment. 1899 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1900 return G->getGlobal()->hasHiddenVisibility() 1901 || G->getGlobal()->hasProtectedVisibility(); 1902 } 1903 } 1904 1905 return false; 1906} 1907 1908//===----------------------------------------------------------------------===// 1909// Other Lowering Hooks 1910//===----------------------------------------------------------------------===// 1911 1912 1913SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { 1914 MachineFunction &MF = DAG.getMachineFunction(); 1915 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1916 int ReturnAddrIndex = FuncInfo->getRAIndex(); 1917 1918 if (ReturnAddrIndex == 0) { 1919 // Set up a frame object for the return address. 1920 if (Subtarget->is64Bit()) 1921 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8); 1922 else 1923 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4); 1924 1925 FuncInfo->setRAIndex(ReturnAddrIndex); 1926 } 1927 1928 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 1929} 1930 1931 1932 1933/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86 1934/// specific condition code. It returns a false if it cannot do a direct 1935/// translation. X86CC is the translated CondCode. LHS/RHS are modified as 1936/// needed. 1937static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 1938 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS, 1939 SelectionDAG &DAG) { 1940 X86CC = X86::COND_INVALID; 1941 if (!isFP) { 1942 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 1943 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 1944 // X > -1 -> X == 0, jump !sign. 1945 RHS = DAG.getConstant(0, RHS.getValueType()); 1946 X86CC = X86::COND_NS; 1947 return true; 1948 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 1949 // X < 0 -> X == 0, jump on sign. 1950 X86CC = X86::COND_S; 1951 return true; 1952 } else if (SetCCOpcode == ISD::SETLT && RHSC->getValue() == 1) { 1953 // X < 1 -> X <= 0 1954 RHS = DAG.getConstant(0, RHS.getValueType()); 1955 X86CC = X86::COND_LE; 1956 return true; 1957 } 1958 } 1959 1960 switch (SetCCOpcode) { 1961 default: break; 1962 case ISD::SETEQ: X86CC = X86::COND_E; break; 1963 case ISD::SETGT: X86CC = X86::COND_G; break; 1964 case ISD::SETGE: X86CC = X86::COND_GE; break; 1965 case ISD::SETLT: X86CC = X86::COND_L; break; 1966 case ISD::SETLE: X86CC = X86::COND_LE; break; 1967 case ISD::SETNE: X86CC = X86::COND_NE; break; 1968 case ISD::SETULT: X86CC = X86::COND_B; break; 1969 case ISD::SETUGT: X86CC = X86::COND_A; break; 1970 case ISD::SETULE: X86CC = X86::COND_BE; break; 1971 case ISD::SETUGE: X86CC = X86::COND_AE; break; 1972 } 1973 } else { 1974 // On a floating point condition, the flags are set as follows: 1975 // ZF PF CF op 1976 // 0 | 0 | 0 | X > Y 1977 // 0 | 0 | 1 | X < Y 1978 // 1 | 0 | 0 | X == Y 1979 // 1 | 1 | 1 | unordered 1980 bool Flip = false; 1981 switch (SetCCOpcode) { 1982 default: break; 1983 case ISD::SETUEQ: 1984 case ISD::SETEQ: X86CC = X86::COND_E; break; 1985 case ISD::SETOLT: Flip = true; // Fallthrough 1986 case ISD::SETOGT: 1987 case ISD::SETGT: X86CC = X86::COND_A; break; 1988 case ISD::SETOLE: Flip = true; // Fallthrough 1989 case ISD::SETOGE: 1990 case ISD::SETGE: X86CC = X86::COND_AE; break; 1991 case ISD::SETUGT: Flip = true; // Fallthrough 1992 case ISD::SETULT: 1993 case ISD::SETLT: X86CC = X86::COND_B; break; 1994 case ISD::SETUGE: Flip = true; // Fallthrough 1995 case ISD::SETULE: 1996 case ISD::SETLE: X86CC = X86::COND_BE; break; 1997 case ISD::SETONE: 1998 case ISD::SETNE: X86CC = X86::COND_NE; break; 1999 case ISD::SETUO: X86CC = X86::COND_P; break; 2000 case ISD::SETO: X86CC = X86::COND_NP; break; 2001 } 2002 if (Flip) 2003 std::swap(LHS, RHS); 2004 } 2005 2006 return X86CC != X86::COND_INVALID; 2007} 2008 2009/// hasFPCMov - is there a floating point cmov for the specific X86 condition 2010/// code. Current x86 isa includes the following FP cmov instructions: 2011/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 2012static bool hasFPCMov(unsigned X86CC) { 2013 switch (X86CC) { 2014 default: 2015 return false; 2016 case X86::COND_B: 2017 case X86::COND_BE: 2018 case X86::COND_E: 2019 case X86::COND_P: 2020 case X86::COND_A: 2021 case X86::COND_AE: 2022 case X86::COND_NE: 2023 case X86::COND_NP: 2024 return true; 2025 } 2026} 2027 2028/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return 2029/// true if Op is undef or if its value falls within the specified range (L, H]. 2030static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { 2031 if (Op.getOpcode() == ISD::UNDEF) 2032 return true; 2033 2034 unsigned Val = cast<ConstantSDNode>(Op)->getValue(); 2035 return (Val >= Low && Val < Hi); 2036} 2037 2038/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return 2039/// true if Op is undef or if its value equal to the specified value. 2040static bool isUndefOrEqual(SDOperand Op, unsigned Val) { 2041 if (Op.getOpcode() == ISD::UNDEF) 2042 return true; 2043 return cast<ConstantSDNode>(Op)->getValue() == Val; 2044} 2045 2046/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand 2047/// specifies a shuffle of elements that is suitable for input to PSHUFD. 2048bool X86::isPSHUFDMask(SDNode *N) { 2049 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2050 2051 if (N->getNumOperands() != 2 && N->getNumOperands() != 4) 2052 return false; 2053 2054 // Check if the value doesn't reference the second vector. 2055 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2056 SDOperand Arg = N->getOperand(i); 2057 if (Arg.getOpcode() == ISD::UNDEF) continue; 2058 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2059 if (cast<ConstantSDNode>(Arg)->getValue() >= e) 2060 return false; 2061 } 2062 2063 return true; 2064} 2065 2066/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand 2067/// specifies a shuffle of elements that is suitable for input to PSHUFHW. 2068bool X86::isPSHUFHWMask(SDNode *N) { 2069 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2070 2071 if (N->getNumOperands() != 8) 2072 return false; 2073 2074 // Lower quadword copied in order. 2075 for (unsigned i = 0; i != 4; ++i) { 2076 SDOperand Arg = N->getOperand(i); 2077 if (Arg.getOpcode() == ISD::UNDEF) continue; 2078 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2079 if (cast<ConstantSDNode>(Arg)->getValue() != i) 2080 return false; 2081 } 2082 2083 // Upper quadword shuffled. 2084 for (unsigned i = 4; i != 8; ++i) { 2085 SDOperand Arg = N->getOperand(i); 2086 if (Arg.getOpcode() == ISD::UNDEF) continue; 2087 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2088 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2089 if (Val < 4 || Val > 7) 2090 return false; 2091 } 2092 2093 return true; 2094} 2095 2096/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand 2097/// specifies a shuffle of elements that is suitable for input to PSHUFLW. 2098bool X86::isPSHUFLWMask(SDNode *N) { 2099 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2100 2101 if (N->getNumOperands() != 8) 2102 return false; 2103 2104 // Upper quadword copied in order. 2105 for (unsigned i = 4; i != 8; ++i) 2106 if (!isUndefOrEqual(N->getOperand(i), i)) 2107 return false; 2108 2109 // Lower quadword shuffled. 2110 for (unsigned i = 0; i != 4; ++i) 2111 if (!isUndefOrInRange(N->getOperand(i), 0, 4)) 2112 return false; 2113 2114 return true; 2115} 2116 2117/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 2118/// specifies a shuffle of elements that is suitable for input to SHUFP*. 2119static bool isSHUFPMask(const SDOperand *Elems, unsigned NumElems) { 2120 if (NumElems != 2 && NumElems != 4) return false; 2121 2122 unsigned Half = NumElems / 2; 2123 for (unsigned i = 0; i < Half; ++i) 2124 if (!isUndefOrInRange(Elems[i], 0, NumElems)) 2125 return false; 2126 for (unsigned i = Half; i < NumElems; ++i) 2127 if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2)) 2128 return false; 2129 2130 return true; 2131} 2132 2133bool X86::isSHUFPMask(SDNode *N) { 2134 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2135 return ::isSHUFPMask(N->op_begin(), N->getNumOperands()); 2136} 2137 2138/// isCommutedSHUFP - Returns true if the shuffle mask is exactly 2139/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 2140/// half elements to come from vector 1 (which would equal the dest.) and 2141/// the upper half to come from vector 2. 2142static bool isCommutedSHUFP(const SDOperand *Ops, unsigned NumOps) { 2143 if (NumOps != 2 && NumOps != 4) return false; 2144 2145 unsigned Half = NumOps / 2; 2146 for (unsigned i = 0; i < Half; ++i) 2147 if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2)) 2148 return false; 2149 for (unsigned i = Half; i < NumOps; ++i) 2150 if (!isUndefOrInRange(Ops[i], 0, NumOps)) 2151 return false; 2152 return true; 2153} 2154 2155static bool isCommutedSHUFP(SDNode *N) { 2156 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2157 return isCommutedSHUFP(N->op_begin(), N->getNumOperands()); 2158} 2159 2160/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 2161/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 2162bool X86::isMOVHLPSMask(SDNode *N) { 2163 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2164 2165 if (N->getNumOperands() != 4) 2166 return false; 2167 2168 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 2169 return isUndefOrEqual(N->getOperand(0), 6) && 2170 isUndefOrEqual(N->getOperand(1), 7) && 2171 isUndefOrEqual(N->getOperand(2), 2) && 2172 isUndefOrEqual(N->getOperand(3), 3); 2173} 2174 2175/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 2176/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 2177/// <2, 3, 2, 3> 2178bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) { 2179 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2180 2181 if (N->getNumOperands() != 4) 2182 return false; 2183 2184 // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3 2185 return isUndefOrEqual(N->getOperand(0), 2) && 2186 isUndefOrEqual(N->getOperand(1), 3) && 2187 isUndefOrEqual(N->getOperand(2), 2) && 2188 isUndefOrEqual(N->getOperand(3), 3); 2189} 2190 2191/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 2192/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 2193bool X86::isMOVLPMask(SDNode *N) { 2194 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2195 2196 unsigned NumElems = N->getNumOperands(); 2197 if (NumElems != 2 && NumElems != 4) 2198 return false; 2199 2200 for (unsigned i = 0; i < NumElems/2; ++i) 2201 if (!isUndefOrEqual(N->getOperand(i), i + NumElems)) 2202 return false; 2203 2204 for (unsigned i = NumElems/2; i < NumElems; ++i) 2205 if (!isUndefOrEqual(N->getOperand(i), i)) 2206 return false; 2207 2208 return true; 2209} 2210 2211/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand 2212/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D} 2213/// and MOVLHPS. 2214bool X86::isMOVHPMask(SDNode *N) { 2215 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2216 2217 unsigned NumElems = N->getNumOperands(); 2218 if (NumElems != 2 && NumElems != 4) 2219 return false; 2220 2221 for (unsigned i = 0; i < NumElems/2; ++i) 2222 if (!isUndefOrEqual(N->getOperand(i), i)) 2223 return false; 2224 2225 for (unsigned i = 0; i < NumElems/2; ++i) { 2226 SDOperand Arg = N->getOperand(i + NumElems/2); 2227 if (!isUndefOrEqual(Arg, i + NumElems)) 2228 return false; 2229 } 2230 2231 return true; 2232} 2233 2234/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 2235/// specifies a shuffle of elements that is suitable for input to UNPCKL. 2236bool static isUNPCKLMask(const SDOperand *Elts, unsigned NumElts, 2237 bool V2IsSplat = false) { 2238 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2239 return false; 2240 2241 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2242 SDOperand BitI = Elts[i]; 2243 SDOperand BitI1 = Elts[i+1]; 2244 if (!isUndefOrEqual(BitI, j)) 2245 return false; 2246 if (V2IsSplat) { 2247 if (isUndefOrEqual(BitI1, NumElts)) 2248 return false; 2249 } else { 2250 if (!isUndefOrEqual(BitI1, j + NumElts)) 2251 return false; 2252 } 2253 } 2254 2255 return true; 2256} 2257 2258bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) { 2259 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2260 return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2261} 2262 2263/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 2264/// specifies a shuffle of elements that is suitable for input to UNPCKH. 2265bool static isUNPCKHMask(const SDOperand *Elts, unsigned NumElts, 2266 bool V2IsSplat = false) { 2267 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2268 return false; 2269 2270 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2271 SDOperand BitI = Elts[i]; 2272 SDOperand BitI1 = Elts[i+1]; 2273 if (!isUndefOrEqual(BitI, j + NumElts/2)) 2274 return false; 2275 if (V2IsSplat) { 2276 if (isUndefOrEqual(BitI1, NumElts)) 2277 return false; 2278 } else { 2279 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts)) 2280 return false; 2281 } 2282 } 2283 2284 return true; 2285} 2286 2287bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) { 2288 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2289 return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2290} 2291 2292/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 2293/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 2294/// <0, 0, 1, 1> 2295bool X86::isUNPCKL_v_undef_Mask(SDNode *N) { 2296 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2297 2298 unsigned NumElems = N->getNumOperands(); 2299 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2300 return false; 2301 2302 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2303 SDOperand BitI = N->getOperand(i); 2304 SDOperand BitI1 = N->getOperand(i+1); 2305 2306 if (!isUndefOrEqual(BitI, j)) 2307 return false; 2308 if (!isUndefOrEqual(BitI1, j)) 2309 return false; 2310 } 2311 2312 return true; 2313} 2314 2315/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 2316/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 2317/// <2, 2, 3, 3> 2318bool X86::isUNPCKH_v_undef_Mask(SDNode *N) { 2319 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2320 2321 unsigned NumElems = N->getNumOperands(); 2322 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2323 return false; 2324 2325 for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { 2326 SDOperand BitI = N->getOperand(i); 2327 SDOperand BitI1 = N->getOperand(i + 1); 2328 2329 if (!isUndefOrEqual(BitI, j)) 2330 return false; 2331 if (!isUndefOrEqual(BitI1, j)) 2332 return false; 2333 } 2334 2335 return true; 2336} 2337 2338/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 2339/// specifies a shuffle of elements that is suitable for input to MOVSS, 2340/// MOVSD, and MOVD, i.e. setting the lowest element. 2341static bool isMOVLMask(const SDOperand *Elts, unsigned NumElts) { 2342 if (NumElts != 2 && NumElts != 4) 2343 return false; 2344 2345 if (!isUndefOrEqual(Elts[0], NumElts)) 2346 return false; 2347 2348 for (unsigned i = 1; i < NumElts; ++i) { 2349 if (!isUndefOrEqual(Elts[i], i)) 2350 return false; 2351 } 2352 2353 return true; 2354} 2355 2356bool X86::isMOVLMask(SDNode *N) { 2357 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2358 return ::isMOVLMask(N->op_begin(), N->getNumOperands()); 2359} 2360 2361/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 2362/// of what x86 movss want. X86 movs requires the lowest element to be lowest 2363/// element of vector 2 and the other elements to come from vector 1 in order. 2364static bool isCommutedMOVL(const SDOperand *Ops, unsigned NumOps, 2365 bool V2IsSplat = false, 2366 bool V2IsUndef = false) { 2367 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 2368 return false; 2369 2370 if (!isUndefOrEqual(Ops[0], 0)) 2371 return false; 2372 2373 for (unsigned i = 1; i < NumOps; ++i) { 2374 SDOperand Arg = Ops[i]; 2375 if (!(isUndefOrEqual(Arg, i+NumOps) || 2376 (V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) || 2377 (V2IsSplat && isUndefOrEqual(Arg, NumOps)))) 2378 return false; 2379 } 2380 2381 return true; 2382} 2383 2384static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false, 2385 bool V2IsUndef = false) { 2386 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2387 return isCommutedMOVL(N->op_begin(), N->getNumOperands(), 2388 V2IsSplat, V2IsUndef); 2389} 2390 2391/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2392/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 2393bool X86::isMOVSHDUPMask(SDNode *N) { 2394 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2395 2396 if (N->getNumOperands() != 4) 2397 return false; 2398 2399 // Expect 1, 1, 3, 3 2400 for (unsigned i = 0; i < 2; ++i) { 2401 SDOperand Arg = N->getOperand(i); 2402 if (Arg.getOpcode() == ISD::UNDEF) continue; 2403 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2404 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2405 if (Val != 1) return false; 2406 } 2407 2408 bool HasHi = false; 2409 for (unsigned i = 2; i < 4; ++i) { 2410 SDOperand Arg = N->getOperand(i); 2411 if (Arg.getOpcode() == ISD::UNDEF) continue; 2412 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2413 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2414 if (Val != 3) return false; 2415 HasHi = true; 2416 } 2417 2418 // Don't use movshdup if it can be done with a shufps. 2419 return HasHi; 2420} 2421 2422/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2423/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 2424bool X86::isMOVSLDUPMask(SDNode *N) { 2425 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2426 2427 if (N->getNumOperands() != 4) 2428 return false; 2429 2430 // Expect 0, 0, 2, 2 2431 for (unsigned i = 0; i < 2; ++i) { 2432 SDOperand Arg = N->getOperand(i); 2433 if (Arg.getOpcode() == ISD::UNDEF) continue; 2434 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2435 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2436 if (Val != 0) return false; 2437 } 2438 2439 bool HasHi = false; 2440 for (unsigned i = 2; i < 4; ++i) { 2441 SDOperand Arg = N->getOperand(i); 2442 if (Arg.getOpcode() == ISD::UNDEF) continue; 2443 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2444 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2445 if (Val != 2) return false; 2446 HasHi = true; 2447 } 2448 2449 // Don't use movshdup if it can be done with a shufps. 2450 return HasHi; 2451} 2452 2453/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand 2454/// specifies a identity operation on the LHS or RHS. 2455static bool isIdentityMask(SDNode *N, bool RHS = false) { 2456 unsigned NumElems = N->getNumOperands(); 2457 for (unsigned i = 0; i < NumElems; ++i) 2458 if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0))) 2459 return false; 2460 return true; 2461} 2462 2463/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2464/// a splat of a single element. 2465static bool isSplatMask(SDNode *N) { 2466 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2467 2468 // This is a splat operation if each element of the permute is the same, and 2469 // if the value doesn't reference the second vector. 2470 unsigned NumElems = N->getNumOperands(); 2471 SDOperand ElementBase; 2472 unsigned i = 0; 2473 for (; i != NumElems; ++i) { 2474 SDOperand Elt = N->getOperand(i); 2475 if (isa<ConstantSDNode>(Elt)) { 2476 ElementBase = Elt; 2477 break; 2478 } 2479 } 2480 2481 if (!ElementBase.Val) 2482 return false; 2483 2484 for (; i != NumElems; ++i) { 2485 SDOperand Arg = N->getOperand(i); 2486 if (Arg.getOpcode() == ISD::UNDEF) continue; 2487 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2488 if (Arg != ElementBase) return false; 2489 } 2490 2491 // Make sure it is a splat of the first vector operand. 2492 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems; 2493} 2494 2495/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2496/// a splat of a single element and it's a 2 or 4 element mask. 2497bool X86::isSplatMask(SDNode *N) { 2498 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2499 2500 // We can only splat 64-bit, and 32-bit quantities with a single instruction. 2501 if (N->getNumOperands() != 4 && N->getNumOperands() != 2) 2502 return false; 2503 return ::isSplatMask(N); 2504} 2505 2506/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand 2507/// specifies a splat of zero element. 2508bool X86::isSplatLoMask(SDNode *N) { 2509 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2510 2511 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) 2512 if (!isUndefOrEqual(N->getOperand(i), 0)) 2513 return false; 2514 return true; 2515} 2516 2517/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 2518/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* 2519/// instructions. 2520unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 2521 unsigned NumOperands = N->getNumOperands(); 2522 unsigned Shift = (NumOperands == 4) ? 2 : 1; 2523 unsigned Mask = 0; 2524 for (unsigned i = 0; i < NumOperands; ++i) { 2525 unsigned Val = 0; 2526 SDOperand Arg = N->getOperand(NumOperands-i-1); 2527 if (Arg.getOpcode() != ISD::UNDEF) 2528 Val = cast<ConstantSDNode>(Arg)->getValue(); 2529 if (Val >= NumOperands) Val -= NumOperands; 2530 Mask |= Val; 2531 if (i != NumOperands - 1) 2532 Mask <<= Shift; 2533 } 2534 2535 return Mask; 2536} 2537 2538/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 2539/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW 2540/// instructions. 2541unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 2542 unsigned Mask = 0; 2543 // 8 nodes, but we only care about the last 4. 2544 for (unsigned i = 7; i >= 4; --i) { 2545 unsigned Val = 0; 2546 SDOperand Arg = N->getOperand(i); 2547 if (Arg.getOpcode() != ISD::UNDEF) 2548 Val = cast<ConstantSDNode>(Arg)->getValue(); 2549 Mask |= (Val - 4); 2550 if (i != 4) 2551 Mask <<= 2; 2552 } 2553 2554 return Mask; 2555} 2556 2557/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 2558/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW 2559/// instructions. 2560unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 2561 unsigned Mask = 0; 2562 // 8 nodes, but we only care about the first 4. 2563 for (int i = 3; i >= 0; --i) { 2564 unsigned Val = 0; 2565 SDOperand Arg = N->getOperand(i); 2566 if (Arg.getOpcode() != ISD::UNDEF) 2567 Val = cast<ConstantSDNode>(Arg)->getValue(); 2568 Mask |= Val; 2569 if (i != 0) 2570 Mask <<= 2; 2571 } 2572 2573 return Mask; 2574} 2575 2576/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand 2577/// specifies a 8 element shuffle that can be broken into a pair of 2578/// PSHUFHW and PSHUFLW. 2579static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { 2580 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2581 2582 if (N->getNumOperands() != 8) 2583 return false; 2584 2585 // Lower quadword shuffled. 2586 for (unsigned i = 0; i != 4; ++i) { 2587 SDOperand Arg = N->getOperand(i); 2588 if (Arg.getOpcode() == ISD::UNDEF) continue; 2589 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2590 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2591 if (Val >= 4) 2592 return false; 2593 } 2594 2595 // Upper quadword shuffled. 2596 for (unsigned i = 4; i != 8; ++i) { 2597 SDOperand Arg = N->getOperand(i); 2598 if (Arg.getOpcode() == ISD::UNDEF) continue; 2599 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2600 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2601 if (Val < 4 || Val > 7) 2602 return false; 2603 } 2604 2605 return true; 2606} 2607 2608/// CommuteVectorShuffle - Swap vector_shuffle operands as well as 2609/// values in ther permute mask. 2610static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, 2611 SDOperand &V2, SDOperand &Mask, 2612 SelectionDAG &DAG) { 2613 MVT::ValueType VT = Op.getValueType(); 2614 MVT::ValueType MaskVT = Mask.getValueType(); 2615 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2616 unsigned NumElems = Mask.getNumOperands(); 2617 SmallVector<SDOperand, 8> MaskVec; 2618 2619 for (unsigned i = 0; i != NumElems; ++i) { 2620 SDOperand Arg = Mask.getOperand(i); 2621 if (Arg.getOpcode() == ISD::UNDEF) { 2622 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2623 continue; 2624 } 2625 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2626 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2627 if (Val < NumElems) 2628 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2629 else 2630 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2631 } 2632 2633 std::swap(V1, V2); 2634 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2635 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2636} 2637 2638/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 2639/// the two vector operands have swapped position. 2640static 2641SDOperand CommuteVectorShuffleMask(SDOperand Mask, SelectionDAG &DAG) { 2642 MVT::ValueType MaskVT = Mask.getValueType(); 2643 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2644 unsigned NumElems = Mask.getNumOperands(); 2645 SmallVector<SDOperand, 8> MaskVec; 2646 for (unsigned i = 0; i != NumElems; ++i) { 2647 SDOperand Arg = Mask.getOperand(i); 2648 if (Arg.getOpcode() == ISD::UNDEF) { 2649 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2650 continue; 2651 } 2652 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2653 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2654 if (Val < NumElems) 2655 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2656 else 2657 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2658 } 2659 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2660} 2661 2662 2663/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 2664/// match movhlps. The lower half elements should come from upper half of 2665/// V1 (and in order), and the upper half elements should come from the upper 2666/// half of V2 (and in order). 2667static bool ShouldXformToMOVHLPS(SDNode *Mask) { 2668 unsigned NumElems = Mask->getNumOperands(); 2669 if (NumElems != 4) 2670 return false; 2671 for (unsigned i = 0, e = 2; i != e; ++i) 2672 if (!isUndefOrEqual(Mask->getOperand(i), i+2)) 2673 return false; 2674 for (unsigned i = 2; i != 4; ++i) 2675 if (!isUndefOrEqual(Mask->getOperand(i), i+4)) 2676 return false; 2677 return true; 2678} 2679 2680/// isScalarLoadToVector - Returns true if the node is a scalar load that 2681/// is promoted to a vector. 2682static inline bool isScalarLoadToVector(SDNode *N) { 2683 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) { 2684 N = N->getOperand(0).Val; 2685 return ISD::isNON_EXTLoad(N); 2686 } 2687 return false; 2688} 2689 2690/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 2691/// match movlp{s|d}. The lower half elements should come from lower half of 2692/// V1 (and in order), and the upper half elements should come from the upper 2693/// half of V2 (and in order). And since V1 will become the source of the 2694/// MOVLP, it must be either a vector load or a scalar load to vector. 2695static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) { 2696 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 2697 return false; 2698 // Is V2 is a vector load, don't do this transformation. We will try to use 2699 // load folding shufps op. 2700 if (ISD::isNON_EXTLoad(V2)) 2701 return false; 2702 2703 unsigned NumElems = Mask->getNumOperands(); 2704 if (NumElems != 2 && NumElems != 4) 2705 return false; 2706 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 2707 if (!isUndefOrEqual(Mask->getOperand(i), i)) 2708 return false; 2709 for (unsigned i = NumElems/2; i != NumElems; ++i) 2710 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems)) 2711 return false; 2712 return true; 2713} 2714 2715/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 2716/// all the same. 2717static bool isSplatVector(SDNode *N) { 2718 if (N->getOpcode() != ISD::BUILD_VECTOR) 2719 return false; 2720 2721 SDOperand SplatValue = N->getOperand(0); 2722 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 2723 if (N->getOperand(i) != SplatValue) 2724 return false; 2725 return true; 2726} 2727 2728/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2729/// to an undef. 2730static bool isUndefShuffle(SDNode *N) { 2731 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2732 return false; 2733 2734 SDOperand V1 = N->getOperand(0); 2735 SDOperand V2 = N->getOperand(1); 2736 SDOperand Mask = N->getOperand(2); 2737 unsigned NumElems = Mask.getNumOperands(); 2738 for (unsigned i = 0; i != NumElems; ++i) { 2739 SDOperand Arg = Mask.getOperand(i); 2740 if (Arg.getOpcode() != ISD::UNDEF) { 2741 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2742 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF) 2743 return false; 2744 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF) 2745 return false; 2746 } 2747 } 2748 return true; 2749} 2750 2751/// isZeroNode - Returns true if Elt is a constant zero or a floating point 2752/// constant +0.0. 2753static inline bool isZeroNode(SDOperand Elt) { 2754 return ((isa<ConstantSDNode>(Elt) && 2755 cast<ConstantSDNode>(Elt)->getValue() == 0) || 2756 (isa<ConstantFPSDNode>(Elt) && 2757 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 2758} 2759 2760/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2761/// to an zero vector. 2762static bool isZeroShuffle(SDNode *N) { 2763 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2764 return false; 2765 2766 SDOperand V1 = N->getOperand(0); 2767 SDOperand V2 = N->getOperand(1); 2768 SDOperand Mask = N->getOperand(2); 2769 unsigned NumElems = Mask.getNumOperands(); 2770 for (unsigned i = 0; i != NumElems; ++i) { 2771 SDOperand Arg = Mask.getOperand(i); 2772 if (Arg.getOpcode() == ISD::UNDEF) 2773 continue; 2774 2775 unsigned Idx = cast<ConstantSDNode>(Arg)->getValue(); 2776 if (Idx < NumElems) { 2777 unsigned Opc = V1.Val->getOpcode(); 2778 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.Val)) 2779 continue; 2780 if (Opc != ISD::BUILD_VECTOR || 2781 !isZeroNode(V1.Val->getOperand(Idx))) 2782 return false; 2783 } else if (Idx >= NumElems) { 2784 unsigned Opc = V2.Val->getOpcode(); 2785 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.Val)) 2786 continue; 2787 if (Opc != ISD::BUILD_VECTOR || 2788 !isZeroNode(V2.Val->getOperand(Idx - NumElems))) 2789 return false; 2790 } 2791 } 2792 return true; 2793} 2794 2795/// getZeroVector - Returns a vector of specified type with all zero elements. 2796/// 2797static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) { 2798 assert(MVT::isVector(VT) && "Expected a vector type"); 2799 2800 // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2801 // type. This ensures they get CSE'd. 2802 SDOperand Cst = DAG.getTargetConstant(0, MVT::i32); 2803 SDOperand Vec; 2804 if (MVT::getSizeInBits(VT) == 64) // MMX 2805 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2806 else // SSE 2807 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2808 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2809} 2810 2811/// getOnesVector - Returns a vector of specified type with all bits set. 2812/// 2813static SDOperand getOnesVector(MVT::ValueType VT, SelectionDAG &DAG) { 2814 assert(MVT::isVector(VT) && "Expected a vector type"); 2815 2816 // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2817 // type. This ensures they get CSE'd. 2818 SDOperand Cst = DAG.getTargetConstant(~0U, MVT::i32); 2819 SDOperand Vec; 2820 if (MVT::getSizeInBits(VT) == 64) // MMX 2821 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2822 else // SSE 2823 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2824 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2825} 2826 2827 2828/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 2829/// that point to V2 points to its first element. 2830static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { 2831 assert(Mask.getOpcode() == ISD::BUILD_VECTOR); 2832 2833 bool Changed = false; 2834 SmallVector<SDOperand, 8> MaskVec; 2835 unsigned NumElems = Mask.getNumOperands(); 2836 for (unsigned i = 0; i != NumElems; ++i) { 2837 SDOperand Arg = Mask.getOperand(i); 2838 if (Arg.getOpcode() != ISD::UNDEF) { 2839 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2840 if (Val > NumElems) { 2841 Arg = DAG.getConstant(NumElems, Arg.getValueType()); 2842 Changed = true; 2843 } 2844 } 2845 MaskVec.push_back(Arg); 2846 } 2847 2848 if (Changed) 2849 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), 2850 &MaskVec[0], MaskVec.size()); 2851 return Mask; 2852} 2853 2854/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 2855/// operation of specified width. 2856static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { 2857 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2858 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2859 2860 SmallVector<SDOperand, 8> MaskVec; 2861 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); 2862 for (unsigned i = 1; i != NumElems; ++i) 2863 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2864 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2865} 2866 2867/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation 2868/// of specified width. 2869static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { 2870 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2871 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2872 SmallVector<SDOperand, 8> MaskVec; 2873 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 2874 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2875 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); 2876 } 2877 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2878} 2879 2880/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation 2881/// of specified width. 2882static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { 2883 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2884 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2885 unsigned Half = NumElems/2; 2886 SmallVector<SDOperand, 8> MaskVec; 2887 for (unsigned i = 0; i != Half; ++i) { 2888 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); 2889 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); 2890 } 2891 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2892} 2893 2894/// getSwapEltZeroMask - Returns a vector_shuffle mask for a shuffle that swaps 2895/// element #0 of a vector with the specified index, leaving the rest of the 2896/// elements in place. 2897static SDOperand getSwapEltZeroMask(unsigned NumElems, unsigned DestElt, 2898 SelectionDAG &DAG) { 2899 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2900 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2901 SmallVector<SDOperand, 8> MaskVec; 2902 // Element #0 of the result gets the elt we are replacing. 2903 MaskVec.push_back(DAG.getConstant(DestElt, BaseVT)); 2904 for (unsigned i = 1; i != NumElems; ++i) 2905 MaskVec.push_back(DAG.getConstant(i == DestElt ? 0 : i, BaseVT)); 2906 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2907} 2908 2909/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32. 2910/// 2911static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) { 2912 SDOperand V1 = Op.getOperand(0); 2913 SDOperand Mask = Op.getOperand(2); 2914 MVT::ValueType VT = Op.getValueType(); 2915 unsigned NumElems = Mask.getNumOperands(); 2916 Mask = getUnpacklMask(NumElems, DAG); 2917 while (NumElems != 4) { 2918 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask); 2919 NumElems >>= 1; 2920 } 2921 V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1); 2922 2923 Mask = getZeroVector(MVT::v4i32, DAG); 2924 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1, 2925 DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask); 2926 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); 2927} 2928 2929/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 2930/// vector of zero or undef vector. This produces a shuffle where the low 2931/// element of V2 is swizzled into the zero/undef vector, landing at element 2932/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 2933static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, unsigned Idx, 2934 bool isZero, SelectionDAG &DAG) { 2935 MVT::ValueType VT = V2.getValueType(); 2936 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT); 2937 unsigned NumElems = MVT::getVectorNumElements(V2.getValueType()); 2938 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2939 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 2940 SmallVector<SDOperand, 16> MaskVec; 2941 for (unsigned i = 0; i != NumElems; ++i) 2942 if (i == Idx) // If this is the insertion idx, put the low elt of V2 here. 2943 MaskVec.push_back(DAG.getConstant(NumElems, EVT)); 2944 else 2945 MaskVec.push_back(DAG.getConstant(i, EVT)); 2946 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2947 &MaskVec[0], MaskVec.size()); 2948 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2949} 2950 2951/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 2952/// 2953static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, 2954 unsigned NumNonZero, unsigned NumZero, 2955 SelectionDAG &DAG, TargetLowering &TLI) { 2956 if (NumNonZero > 8) 2957 return SDOperand(); 2958 2959 SDOperand V(0, 0); 2960 bool First = true; 2961 for (unsigned i = 0; i < 16; ++i) { 2962 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 2963 if (ThisIsNonZero && First) { 2964 if (NumZero) 2965 V = getZeroVector(MVT::v8i16, DAG); 2966 else 2967 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2968 First = false; 2969 } 2970 2971 if ((i & 1) != 0) { 2972 SDOperand ThisElt(0, 0), LastElt(0, 0); 2973 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 2974 if (LastIsNonZero) { 2975 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); 2976 } 2977 if (ThisIsNonZero) { 2978 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i)); 2979 ThisElt = DAG.getNode(ISD::SHL, MVT::i16, 2980 ThisElt, DAG.getConstant(8, MVT::i8)); 2981 if (LastIsNonZero) 2982 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt); 2983 } else 2984 ThisElt = LastElt; 2985 2986 if (ThisElt.Val) 2987 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt, 2988 DAG.getIntPtrConstant(i/2)); 2989 } 2990 } 2991 2992 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V); 2993} 2994 2995/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 2996/// 2997static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, 2998 unsigned NumNonZero, unsigned NumZero, 2999 SelectionDAG &DAG, TargetLowering &TLI) { 3000 if (NumNonZero > 4) 3001 return SDOperand(); 3002 3003 SDOperand V(0, 0); 3004 bool First = true; 3005 for (unsigned i = 0; i < 8; ++i) { 3006 bool isNonZero = (NonZeros & (1 << i)) != 0; 3007 if (isNonZero) { 3008 if (First) { 3009 if (NumZero) 3010 V = getZeroVector(MVT::v8i16, DAG); 3011 else 3012 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 3013 First = false; 3014 } 3015 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i), 3016 DAG.getIntPtrConstant(i)); 3017 } 3018 } 3019 3020 return V; 3021} 3022 3023SDOperand 3024X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3025 // All zero's are handled with pxor, all one's are handled with pcmpeqd. 3026 if (ISD::isBuildVectorAllZeros(Op.Val) || ISD::isBuildVectorAllOnes(Op.Val)) { 3027 // Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to 3028 // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are 3029 // eliminated on x86-32 hosts. 3030 if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32) 3031 return Op; 3032 3033 if (ISD::isBuildVectorAllOnes(Op.Val)) 3034 return getOnesVector(Op.getValueType(), DAG); 3035 return getZeroVector(Op.getValueType(), DAG); 3036 } 3037 3038 MVT::ValueType VT = Op.getValueType(); 3039 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3040 unsigned EVTBits = MVT::getSizeInBits(EVT); 3041 3042 unsigned NumElems = Op.getNumOperands(); 3043 unsigned NumZero = 0; 3044 unsigned NumNonZero = 0; 3045 unsigned NonZeros = 0; 3046 bool IsAllConstants = true; 3047 SmallSet<SDOperand, 8> Values; 3048 for (unsigned i = 0; i < NumElems; ++i) { 3049 SDOperand Elt = Op.getOperand(i); 3050 if (Elt.getOpcode() == ISD::UNDEF) 3051 continue; 3052 Values.insert(Elt); 3053 if (Elt.getOpcode() != ISD::Constant && 3054 Elt.getOpcode() != ISD::ConstantFP) 3055 IsAllConstants = false; 3056 if (isZeroNode(Elt)) 3057 NumZero++; 3058 else { 3059 NonZeros |= (1 << i); 3060 NumNonZero++; 3061 } 3062 } 3063 3064 if (NumNonZero == 0) { 3065 // All undef vector. Return an UNDEF. All zero vectors were handled above. 3066 return DAG.getNode(ISD::UNDEF, VT); 3067 } 3068 3069 // Special case for single non-zero, non-undef, element. 3070 if (NumNonZero == 1 && NumElems <= 4) { 3071 unsigned Idx = CountTrailingZeros_32(NonZeros); 3072 SDOperand Item = Op.getOperand(Idx); 3073 3074 // If this is an insertion of an i64 value on x86-32, and if the top bits of 3075 // the value are obviously zero, truncate the value to i32 and do the 3076 // insertion that way. Only do this if the value is non-constant or if the 3077 // value is a constant being inserted into element 0. It is cheaper to do 3078 // a constant pool load than it is to do a movd + shuffle. 3079 if (EVT == MVT::i64 && !Subtarget->is64Bit() && 3080 (!IsAllConstants || Idx == 0)) { 3081 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 3082 // Handle MMX and SSE both. 3083 MVT::ValueType VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32; 3084 MVT::ValueType VecElts = VT == MVT::v2i64 ? 4 : 2; 3085 3086 // Truncate the value (which may itself be a constant) to i32, and 3087 // convert it to a vector with movd (S2V+shuffle to zero extend). 3088 Item = DAG.getNode(ISD::TRUNCATE, MVT::i32, Item); 3089 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VecVT, Item); 3090 Item = getShuffleVectorZeroOrUndef(Item, 0, true, DAG); 3091 3092 // Now we have our 32-bit value zero extended in the low element of 3093 // a vector. If Idx != 0, swizzle it into place. 3094 if (Idx != 0) { 3095 SDOperand Ops[] = { 3096 Item, DAG.getNode(ISD::UNDEF, Item.getValueType()), 3097 getSwapEltZeroMask(VecElts, Idx, DAG) 3098 }; 3099 Item = DAG.getNode(ISD::VECTOR_SHUFFLE, VecVT, Ops, 3); 3100 } 3101 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Item); 3102 } 3103 } 3104 3105 // If we have a constant or non-constant insertion into the low element of 3106 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 3107 // the rest of the elements. This will be matched as movd/movq/movss/movsd 3108 // depending on what the source datatype is. Because we can only get here 3109 // when NumElems <= 4, this only needs to handle i32/f32/i64/f64. 3110 if (Idx == 0 && 3111 // Don't do this for i64 values on x86-32. 3112 (EVT != MVT::i64 || Subtarget->is64Bit())) { 3113 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 3114 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 3115 return getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, DAG); 3116 } 3117 3118 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 3119 return SDOperand(); 3120 3121 // Otherwise, if this is a vector with i32 or f32 elements, and the element 3122 // is a non-constant being inserted into an element other than the low one, 3123 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 3124 // movd/movss) to move this into the low element, then shuffle it into 3125 // place. 3126 if (EVTBits == 32) { 3127 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 3128 3129 // Turn it into a shuffle of zero and zero-extended scalar to vector. 3130 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, DAG); 3131 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3132 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3133 SmallVector<SDOperand, 8> MaskVec; 3134 for (unsigned i = 0; i < NumElems; i++) 3135 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); 3136 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3137 &MaskVec[0], MaskVec.size()); 3138 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item, 3139 DAG.getNode(ISD::UNDEF, VT), Mask); 3140 } 3141 } 3142 3143 // Splat is obviously ok. Let legalizer expand it to a shuffle. 3144 if (Values.size() == 1) 3145 return SDOperand(); 3146 3147 // A vector full of immediates; various special cases are already 3148 // handled, so this is best done with a single constant-pool load. 3149 if (IsAllConstants) 3150 return SDOperand(); 3151 3152 // Let legalizer expand 2-wide build_vectors. 3153 if (EVTBits == 64) 3154 return SDOperand(); 3155 3156 // If element VT is < 32 bits, convert it to inserts into a zero vector. 3157 if (EVTBits == 8 && NumElems == 16) { 3158 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 3159 *this); 3160 if (V.Val) return V; 3161 } 3162 3163 if (EVTBits == 16 && NumElems == 8) { 3164 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 3165 *this); 3166 if (V.Val) return V; 3167 } 3168 3169 // If element VT is == 32 bits, turn it into a number of shuffles. 3170 SmallVector<SDOperand, 8> V; 3171 V.resize(NumElems); 3172 if (NumElems == 4 && NumZero > 0) { 3173 for (unsigned i = 0; i < 4; ++i) { 3174 bool isZero = !(NonZeros & (1 << i)); 3175 if (isZero) 3176 V[i] = getZeroVector(VT, DAG); 3177 else 3178 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3179 } 3180 3181 for (unsigned i = 0; i < 2; ++i) { 3182 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 3183 default: break; 3184 case 0: 3185 V[i] = V[i*2]; // Must be a zero vector. 3186 break; 3187 case 1: 3188 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2], 3189 getMOVLMask(NumElems, DAG)); 3190 break; 3191 case 2: 3192 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3193 getMOVLMask(NumElems, DAG)); 3194 break; 3195 case 3: 3196 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3197 getUnpacklMask(NumElems, DAG)); 3198 break; 3199 } 3200 } 3201 3202 // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd) 3203 // clears the upper bits. 3204 // FIXME: we can do the same for v4f32 case when we know both parts of 3205 // the lower half come from scalar_to_vector (loadf32). We should do 3206 // that in post legalizer dag combiner with target specific hooks. 3207 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0) 3208 return V[0]; 3209 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3210 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 3211 SmallVector<SDOperand, 8> MaskVec; 3212 bool Reverse = (NonZeros & 0x3) == 2; 3213 for (unsigned i = 0; i < 2; ++i) 3214 if (Reverse) 3215 MaskVec.push_back(DAG.getConstant(1-i, EVT)); 3216 else 3217 MaskVec.push_back(DAG.getConstant(i, EVT)); 3218 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 3219 for (unsigned i = 0; i < 2; ++i) 3220 if (Reverse) 3221 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); 3222 else 3223 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); 3224 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3225 &MaskVec[0], MaskVec.size()); 3226 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask); 3227 } 3228 3229 if (Values.size() > 2) { 3230 // Expand into a number of unpckl*. 3231 // e.g. for v4f32 3232 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 3233 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 3234 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 3235 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG); 3236 for (unsigned i = 0; i < NumElems; ++i) 3237 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3238 NumElems >>= 1; 3239 while (NumElems != 0) { 3240 for (unsigned i = 0; i < NumElems; ++i) 3241 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems], 3242 UnpckMask); 3243 NumElems >>= 1; 3244 } 3245 return V[0]; 3246 } 3247 3248 return SDOperand(); 3249} 3250 3251static 3252SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, 3253 SDOperand PermMask, SelectionDAG &DAG, 3254 TargetLowering &TLI) { 3255 SDOperand NewV; 3256 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(8); 3257 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3258 MVT::ValueType PtrVT = TLI.getPointerTy(); 3259 SmallVector<SDOperand, 8> MaskElts(PermMask.Val->op_begin(), 3260 PermMask.Val->op_end()); 3261 3262 // First record which half of which vector the low elements come from. 3263 SmallVector<unsigned, 4> LowQuad(4); 3264 for (unsigned i = 0; i < 4; ++i) { 3265 SDOperand Elt = MaskElts[i]; 3266 if (Elt.getOpcode() == ISD::UNDEF) 3267 continue; 3268 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3269 int QuadIdx = EltIdx / 4; 3270 ++LowQuad[QuadIdx]; 3271 } 3272 int BestLowQuad = -1; 3273 unsigned MaxQuad = 1; 3274 for (unsigned i = 0; i < 4; ++i) { 3275 if (LowQuad[i] > MaxQuad) { 3276 BestLowQuad = i; 3277 MaxQuad = LowQuad[i]; 3278 } 3279 } 3280 3281 // Record which half of which vector the high elements come from. 3282 SmallVector<unsigned, 4> HighQuad(4); 3283 for (unsigned i = 4; i < 8; ++i) { 3284 SDOperand Elt = MaskElts[i]; 3285 if (Elt.getOpcode() == ISD::UNDEF) 3286 continue; 3287 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3288 int QuadIdx = EltIdx / 4; 3289 ++HighQuad[QuadIdx]; 3290 } 3291 int BestHighQuad = -1; 3292 MaxQuad = 1; 3293 for (unsigned i = 0; i < 4; ++i) { 3294 if (HighQuad[i] > MaxQuad) { 3295 BestHighQuad = i; 3296 MaxQuad = HighQuad[i]; 3297 } 3298 } 3299 3300 // If it's possible to sort parts of either half with PSHUF{H|L}W, then do it. 3301 if (BestLowQuad != -1 || BestHighQuad != -1) { 3302 // First sort the 4 chunks in order using shufpd. 3303 SmallVector<SDOperand, 8> MaskVec; 3304 if (BestLowQuad != -1) 3305 MaskVec.push_back(DAG.getConstant(BestLowQuad, MVT::i32)); 3306 else 3307 MaskVec.push_back(DAG.getConstant(0, MVT::i32)); 3308 if (BestHighQuad != -1) 3309 MaskVec.push_back(DAG.getConstant(BestHighQuad, MVT::i32)); 3310 else 3311 MaskVec.push_back(DAG.getConstant(1, MVT::i32)); 3312 SDOperand Mask= DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, &MaskVec[0],2); 3313 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64, 3314 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V1), 3315 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V2), Mask); 3316 NewV = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, NewV); 3317 3318 // Now sort high and low parts separately. 3319 BitVector InOrder(8); 3320 if (BestLowQuad != -1) { 3321 // Sort lower half in order using PSHUFLW. 3322 MaskVec.clear(); 3323 bool AnyOutOrder = false; 3324 for (unsigned i = 0; i != 4; ++i) { 3325 SDOperand Elt = MaskElts[i]; 3326 if (Elt.getOpcode() == ISD::UNDEF) { 3327 MaskVec.push_back(Elt); 3328 InOrder.set(i); 3329 } else { 3330 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3331 if (EltIdx != i) 3332 AnyOutOrder = true; 3333 MaskVec.push_back(DAG.getConstant(EltIdx % 4, MaskEVT)); 3334 // If this element is in the right place after this shuffle, then 3335 // remember it. 3336 if ((int)(EltIdx / 4) == BestLowQuad) 3337 InOrder.set(i); 3338 } 3339 } 3340 if (AnyOutOrder) { 3341 for (unsigned i = 4; i != 8; ++i) 3342 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3343 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3344 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3345 } 3346 } 3347 3348 if (BestHighQuad != -1) { 3349 // Sort high half in order using PSHUFHW if possible. 3350 MaskVec.clear(); 3351 for (unsigned i = 0; i != 4; ++i) 3352 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3353 bool AnyOutOrder = false; 3354 for (unsigned i = 4; i != 8; ++i) { 3355 SDOperand Elt = MaskElts[i]; 3356 if (Elt.getOpcode() == ISD::UNDEF) { 3357 MaskVec.push_back(Elt); 3358 InOrder.set(i); 3359 } else { 3360 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3361 if (EltIdx != i) 3362 AnyOutOrder = true; 3363 MaskVec.push_back(DAG.getConstant((EltIdx % 4) + 4, MaskEVT)); 3364 // If this element is in the right place after this shuffle, then 3365 // remember it. 3366 if ((int)(EltIdx / 4) == BestHighQuad) 3367 InOrder.set(i); 3368 } 3369 } 3370 if (AnyOutOrder) { 3371 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3372 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3373 } 3374 } 3375 3376 // The other elements are put in the right place using pextrw and pinsrw. 3377 for (unsigned i = 0; i != 8; ++i) { 3378 if (InOrder[i]) 3379 continue; 3380 SDOperand Elt = MaskElts[i]; 3381 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3382 if (EltIdx == i) 3383 continue; 3384 SDOperand ExtOp = (EltIdx < 8) 3385 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3386 DAG.getConstant(EltIdx, PtrVT)) 3387 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3388 DAG.getConstant(EltIdx - 8, PtrVT)); 3389 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3390 DAG.getConstant(i, PtrVT)); 3391 } 3392 return NewV; 3393 } 3394 3395 // PSHUF{H|L}W are not used. Lower into extracts and inserts but try to use 3396 ///as few as possible. 3397 // First, let's find out how many elements are already in the right order. 3398 unsigned V1InOrder = 0; 3399 unsigned V1FromV1 = 0; 3400 unsigned V2InOrder = 0; 3401 unsigned V2FromV2 = 0; 3402 SmallVector<SDOperand, 8> V1Elts; 3403 SmallVector<SDOperand, 8> V2Elts; 3404 for (unsigned i = 0; i < 8; ++i) { 3405 SDOperand Elt = MaskElts[i]; 3406 if (Elt.getOpcode() == ISD::UNDEF) { 3407 V1Elts.push_back(Elt); 3408 V2Elts.push_back(Elt); 3409 ++V1InOrder; 3410 ++V2InOrder; 3411 continue; 3412 } 3413 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3414 if (EltIdx == i) { 3415 V1Elts.push_back(Elt); 3416 V2Elts.push_back(DAG.getConstant(i+8, MaskEVT)); 3417 ++V1InOrder; 3418 } else if (EltIdx == i+8) { 3419 V1Elts.push_back(Elt); 3420 V2Elts.push_back(DAG.getConstant(i, MaskEVT)); 3421 ++V2InOrder; 3422 } else if (EltIdx < 8) { 3423 V1Elts.push_back(Elt); 3424 ++V1FromV1; 3425 } else { 3426 V2Elts.push_back(DAG.getConstant(EltIdx-8, MaskEVT)); 3427 ++V2FromV2; 3428 } 3429 } 3430 3431 if (V2InOrder > V1InOrder) { 3432 PermMask = CommuteVectorShuffleMask(PermMask, DAG); 3433 std::swap(V1, V2); 3434 std::swap(V1Elts, V2Elts); 3435 std::swap(V1FromV1, V2FromV2); 3436 } 3437 3438 if ((V1FromV1 + V1InOrder) != 8) { 3439 // Some elements are from V2. 3440 if (V1FromV1) { 3441 // If there are elements that are from V1 but out of place, 3442 // then first sort them in place 3443 SmallVector<SDOperand, 8> MaskVec; 3444 for (unsigned i = 0; i < 8; ++i) { 3445 SDOperand Elt = V1Elts[i]; 3446 if (Elt.getOpcode() == ISD::UNDEF) { 3447 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3448 continue; 3449 } 3450 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3451 if (EltIdx >= 8) 3452 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3453 else 3454 MaskVec.push_back(DAG.getConstant(EltIdx, MaskEVT)); 3455 } 3456 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3457 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, V1, V1, Mask); 3458 } 3459 3460 NewV = V1; 3461 for (unsigned i = 0; i < 8; ++i) { 3462 SDOperand Elt = V1Elts[i]; 3463 if (Elt.getOpcode() == ISD::UNDEF) 3464 continue; 3465 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3466 if (EltIdx < 8) 3467 continue; 3468 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3469 DAG.getConstant(EltIdx - 8, PtrVT)); 3470 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3471 DAG.getConstant(i, PtrVT)); 3472 } 3473 return NewV; 3474 } else { 3475 // All elements are from V1. 3476 NewV = V1; 3477 for (unsigned i = 0; i < 8; ++i) { 3478 SDOperand Elt = V1Elts[i]; 3479 if (Elt.getOpcode() == ISD::UNDEF) 3480 continue; 3481 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3482 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3483 DAG.getConstant(EltIdx, PtrVT)); 3484 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3485 DAG.getConstant(i, PtrVT)); 3486 } 3487 return NewV; 3488 } 3489} 3490 3491/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 3492/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be 3493/// done when every pair / quad of shuffle mask elements point to elements in 3494/// the right sequence. e.g. 3495/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15> 3496static 3497SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2, 3498 MVT::ValueType VT, 3499 SDOperand PermMask, SelectionDAG &DAG, 3500 TargetLowering &TLI) { 3501 unsigned NumElems = PermMask.getNumOperands(); 3502 unsigned NewWidth = (NumElems == 4) ? 2 : 4; 3503 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NewWidth); 3504 MVT::ValueType NewVT = MaskVT; 3505 switch (VT) { 3506 case MVT::v4f32: NewVT = MVT::v2f64; break; 3507 case MVT::v4i32: NewVT = MVT::v2i64; break; 3508 case MVT::v8i16: NewVT = MVT::v4i32; break; 3509 case MVT::v16i8: NewVT = MVT::v4i32; break; 3510 default: assert(false && "Unexpected!"); 3511 } 3512 3513 if (NewWidth == 2) { 3514 if (MVT::isInteger(VT)) 3515 NewVT = MVT::v2i64; 3516 else 3517 NewVT = MVT::v2f64; 3518 } 3519 unsigned Scale = NumElems / NewWidth; 3520 SmallVector<SDOperand, 8> MaskVec; 3521 for (unsigned i = 0; i < NumElems; i += Scale) { 3522 unsigned StartIdx = ~0U; 3523 for (unsigned j = 0; j < Scale; ++j) { 3524 SDOperand Elt = PermMask.getOperand(i+j); 3525 if (Elt.getOpcode() == ISD::UNDEF) 3526 continue; 3527 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3528 if (StartIdx == ~0U) 3529 StartIdx = EltIdx - (EltIdx % Scale); 3530 if (EltIdx != StartIdx + j) 3531 return SDOperand(); 3532 } 3533 if (StartIdx == ~0U) 3534 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MVT::i32)); 3535 else 3536 MaskVec.push_back(DAG.getConstant(StartIdx / Scale, MVT::i32)); 3537 } 3538 3539 V1 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V1); 3540 V2 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V2); 3541 return DAG.getNode(ISD::VECTOR_SHUFFLE, NewVT, V1, V2, 3542 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3543 &MaskVec[0], MaskVec.size())); 3544} 3545 3546SDOperand 3547X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 3548 SDOperand V1 = Op.getOperand(0); 3549 SDOperand V2 = Op.getOperand(1); 3550 SDOperand PermMask = Op.getOperand(2); 3551 MVT::ValueType VT = Op.getValueType(); 3552 unsigned NumElems = PermMask.getNumOperands(); 3553 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 3554 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 3555 bool V1IsSplat = false; 3556 bool V2IsSplat = false; 3557 3558 if (isUndefShuffle(Op.Val)) 3559 return DAG.getNode(ISD::UNDEF, VT); 3560 3561 if (isZeroShuffle(Op.Val)) 3562 return getZeroVector(VT, DAG); 3563 3564 if (isIdentityMask(PermMask.Val)) 3565 return V1; 3566 else if (isIdentityMask(PermMask.Val, true)) 3567 return V2; 3568 3569 if (isSplatMask(PermMask.Val)) { 3570 if (NumElems <= 4) return Op; 3571 // Promote it to a v4i32 splat. 3572 return PromoteSplat(Op, DAG); 3573 } 3574 3575 // If the shuffle can be profitably rewritten as a narrower shuffle, then 3576 // do it! 3577 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 3578 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3579 if (NewOp.Val) 3580 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3581 } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 3582 // FIXME: Figure out a cleaner way to do this. 3583 // Try to make use of movq to zero out the top part. 3584 if (ISD::isBuildVectorAllZeros(V2.Val)) { 3585 SDOperand NewOp = RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3586 if (NewOp.Val) { 3587 SDOperand NewV1 = NewOp.getOperand(0); 3588 SDOperand NewV2 = NewOp.getOperand(1); 3589 SDOperand NewMask = NewOp.getOperand(2); 3590 if (isCommutedMOVL(NewMask.Val, true, false)) { 3591 NewOp = CommuteVectorShuffle(NewOp, NewV1, NewV2, NewMask, DAG); 3592 NewOp = DAG.getNode(ISD::VECTOR_SHUFFLE, NewOp.getValueType(), 3593 NewV1, NewV2, getMOVLMask(2, DAG)); 3594 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3595 } 3596 } 3597 } else if (ISD::isBuildVectorAllZeros(V1.Val)) { 3598 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3599 if (NewOp.Val && X86::isMOVLMask(NewOp.getOperand(2).Val)) 3600 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3601 } 3602 } 3603 3604 if (X86::isMOVLMask(PermMask.Val)) 3605 return (V1IsUndef) ? V2 : Op; 3606 3607 if (X86::isMOVSHDUPMask(PermMask.Val) || 3608 X86::isMOVSLDUPMask(PermMask.Val) || 3609 X86::isMOVHLPSMask(PermMask.Val) || 3610 X86::isMOVHPMask(PermMask.Val) || 3611 X86::isMOVLPMask(PermMask.Val)) 3612 return Op; 3613 3614 if (ShouldXformToMOVHLPS(PermMask.Val) || 3615 ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val)) 3616 return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3617 3618 bool Commuted = false; 3619 // FIXME: This should also accept a bitcast of a splat? Be careful, not 3620 // 1,1,1,1 -> v8i16 though. 3621 V1IsSplat = isSplatVector(V1.Val); 3622 V2IsSplat = isSplatVector(V2.Val); 3623 3624 // Canonicalize the splat or undef, if present, to be on the RHS. 3625 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 3626 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3627 std::swap(V1IsSplat, V2IsSplat); 3628 std::swap(V1IsUndef, V2IsUndef); 3629 Commuted = true; 3630 } 3631 3632 // FIXME: Figure out a cleaner way to do this. 3633 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) { 3634 if (V2IsUndef) return V1; 3635 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3636 if (V2IsSplat) { 3637 // V2 is a splat, so the mask may be malformed. That is, it may point 3638 // to any V2 element. The instruction selectior won't like this. Get 3639 // a corrected mask and commute to form a proper MOVS{S|D}. 3640 SDOperand NewMask = getMOVLMask(NumElems, DAG); 3641 if (NewMask.Val != PermMask.Val) 3642 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3643 } 3644 return Op; 3645 } 3646 3647 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3648 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3649 X86::isUNPCKLMask(PermMask.Val) || 3650 X86::isUNPCKHMask(PermMask.Val)) 3651 return Op; 3652 3653 if (V2IsSplat) { 3654 // Normalize mask so all entries that point to V2 points to its first 3655 // element then try to match unpck{h|l} again. If match, return a 3656 // new vector_shuffle with the corrected mask. 3657 SDOperand NewMask = NormalizeMask(PermMask, DAG); 3658 if (NewMask.Val != PermMask.Val) { 3659 if (X86::isUNPCKLMask(PermMask.Val, true)) { 3660 SDOperand NewMask = getUnpacklMask(NumElems, DAG); 3661 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3662 } else if (X86::isUNPCKHMask(PermMask.Val, true)) { 3663 SDOperand NewMask = getUnpackhMask(NumElems, DAG); 3664 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3665 } 3666 } 3667 } 3668 3669 // Normalize the node to match x86 shuffle ops if needed 3670 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val)) 3671 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3672 3673 if (Commuted) { 3674 // Commute is back and try unpck* again. 3675 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3676 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3677 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3678 X86::isUNPCKLMask(PermMask.Val) || 3679 X86::isUNPCKHMask(PermMask.Val)) 3680 return Op; 3681 } 3682 3683 // If VT is integer, try PSHUF* first, then SHUFP*. 3684 if (MVT::isInteger(VT)) { 3685 // MMX doesn't have PSHUFD; it does have PSHUFW. While it's theoretically 3686 // possible to shuffle a v2i32 using PSHUFW, that's not yet implemented. 3687 if (((MVT::getSizeInBits(VT) != 64 || NumElems == 4) && 3688 X86::isPSHUFDMask(PermMask.Val)) || 3689 X86::isPSHUFHWMask(PermMask.Val) || 3690 X86::isPSHUFLWMask(PermMask.Val)) { 3691 if (V2.getOpcode() != ISD::UNDEF) 3692 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3693 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3694 return Op; 3695 } 3696 3697 if (X86::isSHUFPMask(PermMask.Val) && 3698 MVT::getSizeInBits(VT) != 64) // Don't do this for MMX. 3699 return Op; 3700 } else { 3701 // Floating point cases in the other order. 3702 if (X86::isSHUFPMask(PermMask.Val)) 3703 return Op; 3704 if (X86::isPSHUFDMask(PermMask.Val) || 3705 X86::isPSHUFHWMask(PermMask.Val) || 3706 X86::isPSHUFLWMask(PermMask.Val)) { 3707 if (V2.getOpcode() != ISD::UNDEF) 3708 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3709 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3710 return Op; 3711 } 3712 } 3713 3714 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 3715 if (VT == MVT::v8i16) { 3716 SDOperand NewOp = LowerVECTOR_SHUFFLEv8i16(V1, V2, PermMask, DAG, *this); 3717 if (NewOp.Val) 3718 return NewOp; 3719 } 3720 3721 // Handle all 4 wide cases with a number of shuffles. 3722 if (NumElems == 4 && MVT::getSizeInBits(VT) != 64) { 3723 // Don't do this for MMX. 3724 MVT::ValueType MaskVT = PermMask.getValueType(); 3725 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3726 SmallVector<std::pair<int, int>, 8> Locs; 3727 Locs.reserve(NumElems); 3728 SmallVector<SDOperand, 8> Mask1(NumElems, 3729 DAG.getNode(ISD::UNDEF, MaskEVT)); 3730 SmallVector<SDOperand, 8> Mask2(NumElems, 3731 DAG.getNode(ISD::UNDEF, MaskEVT)); 3732 unsigned NumHi = 0; 3733 unsigned NumLo = 0; 3734 // If no more than two elements come from either vector. This can be 3735 // implemented with two shuffles. First shuffle gather the elements. 3736 // The second shuffle, which takes the first shuffle as both of its 3737 // vector operands, put the elements into the right order. 3738 for (unsigned i = 0; i != NumElems; ++i) { 3739 SDOperand Elt = PermMask.getOperand(i); 3740 if (Elt.getOpcode() == ISD::UNDEF) { 3741 Locs[i] = std::make_pair(-1, -1); 3742 } else { 3743 unsigned Val = cast<ConstantSDNode>(Elt)->getValue(); 3744 if (Val < NumElems) { 3745 Locs[i] = std::make_pair(0, NumLo); 3746 Mask1[NumLo] = Elt; 3747 NumLo++; 3748 } else { 3749 Locs[i] = std::make_pair(1, NumHi); 3750 if (2+NumHi < NumElems) 3751 Mask1[2+NumHi] = Elt; 3752 NumHi++; 3753 } 3754 } 3755 } 3756 if (NumLo <= 2 && NumHi <= 2) { 3757 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3758 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3759 &Mask1[0], Mask1.size())); 3760 for (unsigned i = 0; i != NumElems; ++i) { 3761 if (Locs[i].first == -1) 3762 continue; 3763 else { 3764 unsigned Idx = (i < NumElems/2) ? 0 : NumElems; 3765 Idx += Locs[i].first * (NumElems/2) + Locs[i].second; 3766 Mask2[i] = DAG.getConstant(Idx, MaskEVT); 3767 } 3768 } 3769 3770 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, 3771 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3772 &Mask2[0], Mask2.size())); 3773 } 3774 3775 // Break it into (shuffle shuffle_hi, shuffle_lo). 3776 Locs.clear(); 3777 SmallVector<SDOperand,8> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3778 SmallVector<SDOperand,8> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3779 SmallVector<SDOperand,8> *MaskPtr = &LoMask; 3780 unsigned MaskIdx = 0; 3781 unsigned LoIdx = 0; 3782 unsigned HiIdx = NumElems/2; 3783 for (unsigned i = 0; i != NumElems; ++i) { 3784 if (i == NumElems/2) { 3785 MaskPtr = &HiMask; 3786 MaskIdx = 1; 3787 LoIdx = 0; 3788 HiIdx = NumElems/2; 3789 } 3790 SDOperand Elt = PermMask.getOperand(i); 3791 if (Elt.getOpcode() == ISD::UNDEF) { 3792 Locs[i] = std::make_pair(-1, -1); 3793 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) { 3794 Locs[i] = std::make_pair(MaskIdx, LoIdx); 3795 (*MaskPtr)[LoIdx] = Elt; 3796 LoIdx++; 3797 } else { 3798 Locs[i] = std::make_pair(MaskIdx, HiIdx); 3799 (*MaskPtr)[HiIdx] = Elt; 3800 HiIdx++; 3801 } 3802 } 3803 3804 SDOperand LoShuffle = 3805 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3806 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3807 &LoMask[0], LoMask.size())); 3808 SDOperand HiShuffle = 3809 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3810 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3811 &HiMask[0], HiMask.size())); 3812 SmallVector<SDOperand, 8> MaskOps; 3813 for (unsigned i = 0; i != NumElems; ++i) { 3814 if (Locs[i].first == -1) { 3815 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3816 } else { 3817 unsigned Idx = Locs[i].first * NumElems + Locs[i].second; 3818 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT)); 3819 } 3820 } 3821 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle, 3822 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3823 &MaskOps[0], MaskOps.size())); 3824 } 3825 3826 return SDOperand(); 3827} 3828 3829SDOperand 3830X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDOperand Op, 3831 SelectionDAG &DAG) { 3832 MVT::ValueType VT = Op.getValueType(); 3833 if (MVT::getSizeInBits(VT) == 8) { 3834 SDOperand Extract = DAG.getNode(X86ISD::PEXTRB, MVT::i32, 3835 Op.getOperand(0), Op.getOperand(1)); 3836 SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, 3837 DAG.getValueType(VT)); 3838 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3839 } else if (MVT::getSizeInBits(VT) == 16) { 3840 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, MVT::i32, 3841 Op.getOperand(0), Op.getOperand(1)); 3842 SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, 3843 DAG.getValueType(VT)); 3844 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3845 } 3846 return SDOperand(); 3847} 3848 3849 3850SDOperand 3851X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3852 if (!isa<ConstantSDNode>(Op.getOperand(1))) 3853 return SDOperand(); 3854 3855 if (Subtarget->hasSSE41()) 3856 return LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 3857 3858 MVT::ValueType VT = Op.getValueType(); 3859 // TODO: handle v16i8. 3860 if (MVT::getSizeInBits(VT) == 16) { 3861 SDOperand Vec = Op.getOperand(0); 3862 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3863 if (Idx == 0) 3864 return DAG.getNode(ISD::TRUNCATE, MVT::i16, 3865 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, 3866 DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Vec), 3867 Op.getOperand(1))); 3868 // Transform it so it match pextrw which produces a 32-bit result. 3869 MVT::ValueType EVT = (MVT::ValueType)(VT+1); 3870 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, 3871 Op.getOperand(0), Op.getOperand(1)); 3872 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, 3873 DAG.getValueType(VT)); 3874 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3875 } else if (MVT::getSizeInBits(VT) == 32) { 3876 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3877 if (Idx == 0) 3878 return Op; 3879 // SHUFPS the element to the lowest double word, then movss. 3880 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3881 SmallVector<SDOperand, 8> IdxVec; 3882 IdxVec. 3883 push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT))); 3884 IdxVec. 3885 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3886 IdxVec. 3887 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3888 IdxVec. 3889 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3890 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3891 &IdxVec[0], IdxVec.size()); 3892 SDOperand Vec = Op.getOperand(0); 3893 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3894 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3895 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3896 DAG.getIntPtrConstant(0)); 3897 } else if (MVT::getSizeInBits(VT) == 64) { 3898 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 3899 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 3900 // to match extract_elt for f64. 3901 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3902 if (Idx == 0) 3903 return Op; 3904 3905 // UNPCKHPD the element to the lowest double word, then movsd. 3906 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 3907 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 3908 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3909 SmallVector<SDOperand, 8> IdxVec; 3910 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT))); 3911 IdxVec. 3912 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3913 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3914 &IdxVec[0], IdxVec.size()); 3915 SDOperand Vec = Op.getOperand(0); 3916 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3917 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3918 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3919 DAG.getIntPtrConstant(0)); 3920 } 3921 3922 return SDOperand(); 3923} 3924 3925SDOperand 3926X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG){ 3927 MVT::ValueType VT = Op.getValueType(); 3928 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3929 3930 SDOperand N0 = Op.getOperand(0); 3931 SDOperand N1 = Op.getOperand(1); 3932 SDOperand N2 = Op.getOperand(2); 3933 3934 if ((MVT::getSizeInBits(EVT) == 8) || (MVT::getSizeInBits(EVT) == 16)) { 3935 unsigned Opc = (MVT::getSizeInBits(EVT) == 8) ? X86ISD::PINSRB 3936 : X86ISD::PINSRW; 3937 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 3938 // argument. 3939 if (N1.getValueType() != MVT::i32) 3940 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3941 if (N2.getValueType() != MVT::i32) 3942 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); 3943 return DAG.getNode(Opc, VT, N0, N1, N2); 3944 } else if (EVT == MVT::f32) { 3945 // Bits [7:6] of the constant are the source select. This will always be 3946 // zero here. The DAG Combiner may combine an extract_elt index into these 3947 // bits. For example (insert (extract, 3), 2) could be matched by putting 3948 // the '3' into bits [7:6] of X86ISD::INSERTPS. 3949 // Bits [5:4] of the constant are the destination select. This is the 3950 // value of the incoming immediate. 3951 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 3952 // combine either bitwise AND or insert of float 0.0 to set these bits. 3953 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue() << 4); 3954 return DAG.getNode(X86ISD::INSERTPS, VT, N0, N1, N2); 3955 } 3956 return SDOperand(); 3957} 3958 3959SDOperand 3960X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3961 MVT::ValueType VT = Op.getValueType(); 3962 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3963 3964 if (Subtarget->hasSSE41()) 3965 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 3966 3967 if (EVT == MVT::i8) 3968 return SDOperand(); 3969 3970 SDOperand N0 = Op.getOperand(0); 3971 SDOperand N1 = Op.getOperand(1); 3972 SDOperand N2 = Op.getOperand(2); 3973 3974 if (MVT::getSizeInBits(EVT) == 16) { 3975 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 3976 // as its second argument. 3977 if (N1.getValueType() != MVT::i32) 3978 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3979 if (N2.getValueType() != MVT::i32) 3980 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); 3981 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2); 3982 } 3983 return SDOperand(); 3984} 3985 3986SDOperand 3987X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3988 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); 3989 MVT::ValueType VT = MVT::v2i32; 3990 switch (Op.getValueType()) { 3991 default: break; 3992 case MVT::v16i8: 3993 case MVT::v8i16: 3994 VT = MVT::v4i32; 3995 break; 3996 } 3997 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), 3998 DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, AnyExt)); 3999} 4000 4001// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 4002// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 4003// one of the above mentioned nodes. It has to be wrapped because otherwise 4004// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 4005// be used to form addressing mode. These wrapped nodes will be selected 4006// into MOV32ri. 4007SDOperand 4008X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 4009 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 4010 SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(), 4011 getPointerTy(), 4012 CP->getAlignment()); 4013 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4014 // With PIC, the address is actually $g + Offset. 4015 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4016 !Subtarget->isPICStyleRIPRel()) { 4017 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4018 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4019 Result); 4020 } 4021 4022 return Result; 4023} 4024 4025SDOperand 4026X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 4027 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 4028 SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); 4029 // If it's a debug information descriptor, don't mess with it. 4030 if (DAG.isVerifiedDebugInfoDesc(Op)) 4031 return Result; 4032 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4033 // With PIC, the address is actually $g + Offset. 4034 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4035 !Subtarget->isPICStyleRIPRel()) { 4036 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4037 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4038 Result); 4039 } 4040 4041 // For Darwin & Mingw32, external and weak symbols are indirect, so we want to 4042 // load the value at address GV, not the value of GV itself. This means that 4043 // the GlobalAddress must be in the base or index register of the address, not 4044 // the GV offset field. Platform check is inside GVRequiresExtraLoad() call 4045 // The same applies for external symbols during PIC codegen 4046 if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)) 4047 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, 4048 PseudoSourceValue::getGOT(), 0); 4049 4050 return Result; 4051} 4052 4053// Lower ISD::GlobalTLSAddress using the "general dynamic" model 4054static SDOperand 4055LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 4056 const MVT::ValueType PtrVT) { 4057 SDOperand InFlag; 4058 SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX, 4059 DAG.getNode(X86ISD::GlobalBaseReg, 4060 PtrVT), InFlag); 4061 InFlag = Chain.getValue(1); 4062 4063 // emit leal symbol@TLSGD(,%ebx,1), %eax 4064 SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag); 4065 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 4066 GA->getValueType(0), 4067 GA->getOffset()); 4068 SDOperand Ops[] = { Chain, TGA, InFlag }; 4069 SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3); 4070 InFlag = Result.getValue(2); 4071 Chain = Result.getValue(1); 4072 4073 // call ___tls_get_addr. This function receives its argument in 4074 // the register EAX. 4075 Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag); 4076 InFlag = Chain.getValue(1); 4077 4078 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4079 SDOperand Ops1[] = { Chain, 4080 DAG.getTargetExternalSymbol("___tls_get_addr", 4081 PtrVT), 4082 DAG.getRegister(X86::EAX, PtrVT), 4083 DAG.getRegister(X86::EBX, PtrVT), 4084 InFlag }; 4085 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5); 4086 InFlag = Chain.getValue(1); 4087 4088 return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag); 4089} 4090 4091// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 4092// "local exec" model. 4093static SDOperand 4094LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 4095 const MVT::ValueType PtrVT) { 4096 // Get the Thread Pointer 4097 SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT); 4098 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 4099 // exec) 4100 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 4101 GA->getValueType(0), 4102 GA->getOffset()); 4103 SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA); 4104 4105 if (GA->getGlobal()->isDeclaration()) // initial exec TLS model 4106 Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, 4107 PseudoSourceValue::getGOT(), 0); 4108 4109 // The address of the thread local variable is the add of the thread 4110 // pointer with the offset of the variable. 4111 return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset); 4112} 4113 4114SDOperand 4115X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { 4116 // TODO: implement the "local dynamic" model 4117 // TODO: implement the "initial exec"model for pic executables 4118 assert(!Subtarget->is64Bit() && Subtarget->isTargetELF() && 4119 "TLS not implemented for non-ELF and 64-bit targets"); 4120 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 4121 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 4122 // otherwise use the "Local Exec"TLS Model 4123 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 4124 return LowerToTLSGeneralDynamicModel(GA, DAG, getPointerTy()); 4125 else 4126 return LowerToTLSExecModel(GA, DAG, getPointerTy()); 4127} 4128 4129SDOperand 4130X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { 4131 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 4132 SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 4133 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4134 // With PIC, the address is actually $g + Offset. 4135 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4136 !Subtarget->isPICStyleRIPRel()) { 4137 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4138 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4139 Result); 4140 } 4141 4142 return Result; 4143} 4144 4145SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 4146 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 4147 SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); 4148 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4149 // With PIC, the address is actually $g + Offset. 4150 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4151 !Subtarget->isPICStyleRIPRel()) { 4152 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4153 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4154 Result); 4155 } 4156 4157 return Result; 4158} 4159 4160/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and 4161/// take a 2 x i32 value to shift plus a shift amount. 4162SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { 4163 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 4164 MVT::ValueType VT = Op.getValueType(); 4165 unsigned VTBits = MVT::getSizeInBits(VT); 4166 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 4167 SDOperand ShOpLo = Op.getOperand(0); 4168 SDOperand ShOpHi = Op.getOperand(1); 4169 SDOperand ShAmt = Op.getOperand(2); 4170 SDOperand Tmp1 = isSRA ? 4171 DAG.getNode(ISD::SRA, VT, ShOpHi, DAG.getConstant(VTBits - 1, MVT::i8)) : 4172 DAG.getConstant(0, VT); 4173 4174 SDOperand Tmp2, Tmp3; 4175 if (Op.getOpcode() == ISD::SHL_PARTS) { 4176 Tmp2 = DAG.getNode(X86ISD::SHLD, VT, ShOpHi, ShOpLo, ShAmt); 4177 Tmp3 = DAG.getNode(ISD::SHL, VT, ShOpLo, ShAmt); 4178 } else { 4179 Tmp2 = DAG.getNode(X86ISD::SHRD, VT, ShOpLo, ShOpHi, ShAmt); 4180 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, VT, ShOpHi, ShAmt); 4181 } 4182 4183 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4184 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, 4185 DAG.getConstant(VTBits, MVT::i8)); 4186 SDOperand Cond = DAG.getNode(X86ISD::CMP, VT, 4187 AndNode, DAG.getConstant(0, MVT::i8)); 4188 4189 SDOperand Hi, Lo; 4190 SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4191 VTs = DAG.getNodeValueTypes(VT, MVT::Flag); 4192 SmallVector<SDOperand, 4> Ops; 4193 if (Op.getOpcode() == ISD::SHL_PARTS) { 4194 Ops.push_back(Tmp2); 4195 Ops.push_back(Tmp3); 4196 Ops.push_back(CC); 4197 Ops.push_back(Cond); 4198 Hi = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4199 4200 Ops.clear(); 4201 Ops.push_back(Tmp3); 4202 Ops.push_back(Tmp1); 4203 Ops.push_back(CC); 4204 Ops.push_back(Cond); 4205 Lo = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4206 } else { 4207 Ops.push_back(Tmp2); 4208 Ops.push_back(Tmp3); 4209 Ops.push_back(CC); 4210 Ops.push_back(Cond); 4211 Lo = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4212 4213 Ops.clear(); 4214 Ops.push_back(Tmp3); 4215 Ops.push_back(Tmp1); 4216 Ops.push_back(CC); 4217 Ops.push_back(Cond); 4218 Hi = DAG.getNode(X86ISD::CMOV, VT, &Ops[0], Ops.size()); 4219 } 4220 4221 VTs = DAG.getNodeValueTypes(VT, VT); 4222 Ops.clear(); 4223 Ops.push_back(Lo); 4224 Ops.push_back(Hi); 4225 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size()); 4226} 4227 4228SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 4229 MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); 4230 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 && 4231 "Unknown SINT_TO_FP to lower!"); 4232 4233 // These are really Legal; caller falls through into that case. 4234 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 4235 return SDOperand(); 4236 if (SrcVT == MVT::i64 && Op.getValueType() != MVT::f80 && 4237 Subtarget->is64Bit()) 4238 return SDOperand(); 4239 4240 unsigned Size = MVT::getSizeInBits(SrcVT)/8; 4241 MachineFunction &MF = DAG.getMachineFunction(); 4242 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 4243 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4244 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), 4245 StackSlot, 4246 PseudoSourceValue::getFixedStack(), 4247 SSFI); 4248 4249 // Build the FILD 4250 SDVTList Tys; 4251 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 4252 if (useSSE) 4253 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 4254 else 4255 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 4256 SmallVector<SDOperand, 8> Ops; 4257 Ops.push_back(Chain); 4258 Ops.push_back(StackSlot); 4259 Ops.push_back(DAG.getValueType(SrcVT)); 4260 SDOperand Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, 4261 Tys, &Ops[0], Ops.size()); 4262 4263 if (useSSE) { 4264 Chain = Result.getValue(1); 4265 SDOperand InFlag = Result.getValue(2); 4266 4267 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 4268 // shouldn't be necessary except that RFP cannot be live across 4269 // multiple blocks. When stackifier is fixed, they can be uncoupled. 4270 MachineFunction &MF = DAG.getMachineFunction(); 4271 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 4272 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4273 Tys = DAG.getVTList(MVT::Other); 4274 SmallVector<SDOperand, 8> Ops; 4275 Ops.push_back(Chain); 4276 Ops.push_back(Result); 4277 Ops.push_back(StackSlot); 4278 Ops.push_back(DAG.getValueType(Op.getValueType())); 4279 Ops.push_back(InFlag); 4280 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 4281 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, 4282 PseudoSourceValue::getFixedStack(), SSFI); 4283 } 4284 4285 return Result; 4286} 4287 4288std::pair<SDOperand,SDOperand> X86TargetLowering:: 4289FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) { 4290 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 && 4291 "Unknown FP_TO_SINT to lower!"); 4292 4293 // These are really Legal. 4294 if (Op.getValueType() == MVT::i32 && 4295 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 4296 return std::make_pair(SDOperand(), SDOperand()); 4297 if (Subtarget->is64Bit() && 4298 Op.getValueType() == MVT::i64 && 4299 Op.getOperand(0).getValueType() != MVT::f80) 4300 return std::make_pair(SDOperand(), SDOperand()); 4301 4302 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 4303 // stack slot. 4304 MachineFunction &MF = DAG.getMachineFunction(); 4305 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; 4306 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4307 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4308 unsigned Opc; 4309 switch (Op.getValueType()) { 4310 default: assert(0 && "Invalid FP_TO_SINT to lower!"); 4311 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 4312 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 4313 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 4314 } 4315 4316 SDOperand Chain = DAG.getEntryNode(); 4317 SDOperand Value = Op.getOperand(0); 4318 if (isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) { 4319 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 4320 Chain = DAG.getStore(Chain, Value, StackSlot, 4321 PseudoSourceValue::getFixedStack(), SSFI); 4322 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 4323 SDOperand Ops[] = { 4324 Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType()) 4325 }; 4326 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 4327 Chain = Value.getValue(1); 4328 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4329 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4330 } 4331 4332 // Build the FP_TO_INT*_IN_MEM 4333 SDOperand Ops[] = { Chain, Value, StackSlot }; 4334 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3); 4335 4336 return std::make_pair(FIST, StackSlot); 4337} 4338 4339SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 4340 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(Op, DAG); 4341 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4342 if (FIST.Val == 0) return SDOperand(); 4343 4344 // Load the result. 4345 return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0); 4346} 4347 4348SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) { 4349 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(SDOperand(N, 0), DAG); 4350 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4351 if (FIST.Val == 0) return 0; 4352 4353 // Return an i64 load from the stack slot. 4354 SDOperand Res = DAG.getLoad(MVT::i64, FIST, StackSlot, NULL, 0); 4355 4356 // Use a MERGE_VALUES node to drop the chain result value. 4357 return DAG.getNode(ISD::MERGE_VALUES, MVT::i64, Res).Val; 4358} 4359 4360SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { 4361 MVT::ValueType VT = Op.getValueType(); 4362 MVT::ValueType EltVT = VT; 4363 if (MVT::isVector(VT)) 4364 EltVT = MVT::getVectorElementType(VT); 4365 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 4366 std::vector<Constant*> CV; 4367 if (EltVT == MVT::f64) { 4368 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, ~(1ULL << 63)))); 4369 CV.push_back(C); 4370 CV.push_back(C); 4371 } else { 4372 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, ~(1U << 31)))); 4373 CV.push_back(C); 4374 CV.push_back(C); 4375 CV.push_back(C); 4376 CV.push_back(C); 4377 } 4378 Constant *C = ConstantVector::get(CV); 4379 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4380 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4381 PseudoSourceValue::getConstantPool(), 0, 4382 false, 16); 4383 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask); 4384} 4385 4386SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { 4387 MVT::ValueType VT = Op.getValueType(); 4388 MVT::ValueType EltVT = VT; 4389 unsigned EltNum = 1; 4390 if (MVT::isVector(VT)) { 4391 EltVT = MVT::getVectorElementType(VT); 4392 EltNum = MVT::getVectorNumElements(VT); 4393 } 4394 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 4395 std::vector<Constant*> CV; 4396 if (EltVT == MVT::f64) { 4397 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, 1ULL << 63))); 4398 CV.push_back(C); 4399 CV.push_back(C); 4400 } else { 4401 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, 1U << 31))); 4402 CV.push_back(C); 4403 CV.push_back(C); 4404 CV.push_back(C); 4405 CV.push_back(C); 4406 } 4407 Constant *C = ConstantVector::get(CV); 4408 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4409 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4410 PseudoSourceValue::getConstantPool(), 0, 4411 false, 16); 4412 if (MVT::isVector(VT)) { 4413 return DAG.getNode(ISD::BIT_CONVERT, VT, 4414 DAG.getNode(ISD::XOR, MVT::v2i64, 4415 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)), 4416 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Mask))); 4417 } else { 4418 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask); 4419 } 4420} 4421 4422SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { 4423 SDOperand Op0 = Op.getOperand(0); 4424 SDOperand Op1 = Op.getOperand(1); 4425 MVT::ValueType VT = Op.getValueType(); 4426 MVT::ValueType SrcVT = Op1.getValueType(); 4427 const Type *SrcTy = MVT::getTypeForValueType(SrcVT); 4428 4429 // If second operand is smaller, extend it first. 4430 if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) { 4431 Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1); 4432 SrcVT = VT; 4433 SrcTy = MVT::getTypeForValueType(SrcVT); 4434 } 4435 // And if it is bigger, shrink it first. 4436 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4437 Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1, DAG.getIntPtrConstant(1)); 4438 SrcVT = VT; 4439 SrcTy = MVT::getTypeForValueType(SrcVT); 4440 } 4441 4442 // At this point the operands and the result should have the same 4443 // type, and that won't be f80 since that is not custom lowered. 4444 4445 // First get the sign bit of second operand. 4446 std::vector<Constant*> CV; 4447 if (SrcVT == MVT::f64) { 4448 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 1ULL << 63)))); 4449 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4450 } else { 4451 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 1U << 31)))); 4452 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4453 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4454 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4455 } 4456 Constant *C = ConstantVector::get(CV); 4457 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4458 SDOperand Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx, 4459 PseudoSourceValue::getConstantPool(), 0, 4460 false, 16); 4461 SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); 4462 4463 // Shift sign bit right or left if the two operands have different types. 4464 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4465 // Op0 is MVT::f32, Op1 is MVT::f64. 4466 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit); 4467 SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit, 4468 DAG.getConstant(32, MVT::i32)); 4469 SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit); 4470 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit, 4471 DAG.getIntPtrConstant(0)); 4472 } 4473 4474 // Clear first operand sign bit. 4475 CV.clear(); 4476 if (VT == MVT::f64) { 4477 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, ~(1ULL << 63))))); 4478 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4479 } else { 4480 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, ~(1U << 31))))); 4481 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4482 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4483 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4484 } 4485 C = ConstantVector::get(CV); 4486 CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4487 SDOperand Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4488 PseudoSourceValue::getConstantPool(), 0, 4489 false, 16); 4490 SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2); 4491 4492 // Or the value with the sign bit. 4493 return DAG.getNode(X86ISD::FOR, VT, Val, SignBit); 4494} 4495 4496SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { 4497 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 4498 SDOperand Cond; 4499 SDOperand Op0 = Op.getOperand(0); 4500 SDOperand Op1 = Op.getOperand(1); 4501 SDOperand CC = Op.getOperand(2); 4502 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 4503 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); 4504 unsigned X86CC; 4505 4506 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, 4507 Op0, Op1, DAG)) { 4508 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4509 return DAG.getNode(X86ISD::SETCC, MVT::i8, 4510 DAG.getConstant(X86CC, MVT::i8), Cond); 4511 } 4512 4513 assert(isFP && "Illegal integer SetCC!"); 4514 4515 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4516 switch (SetCCOpcode) { 4517 default: assert(false && "Illegal floating point SetCC!"); 4518 case ISD::SETOEQ: { // !PF & ZF 4519 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4520 DAG.getConstant(X86::COND_NP, MVT::i8), Cond); 4521 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4522 DAG.getConstant(X86::COND_E, MVT::i8), Cond); 4523 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); 4524 } 4525 case ISD::SETUNE: { // PF | !ZF 4526 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4527 DAG.getConstant(X86::COND_P, MVT::i8), Cond); 4528 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4529 DAG.getConstant(X86::COND_NE, MVT::i8), Cond); 4530 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); 4531 } 4532 } 4533} 4534 4535 4536SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { 4537 bool addTest = true; 4538 SDOperand Cond = Op.getOperand(0); 4539 SDOperand CC; 4540 4541 if (Cond.getOpcode() == ISD::SETCC) 4542 Cond = LowerSETCC(Cond, DAG); 4543 4544 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4545 // setting operand in place of the X86ISD::SETCC. 4546 if (Cond.getOpcode() == X86ISD::SETCC) { 4547 CC = Cond.getOperand(0); 4548 4549 SDOperand Cmp = Cond.getOperand(1); 4550 unsigned Opc = Cmp.getOpcode(); 4551 MVT::ValueType VT = Op.getValueType(); 4552 4553 bool IllegalFPCMov = false; 4554 if (MVT::isFloatingPoint(VT) && !MVT::isVector(VT) && 4555 !isScalarFPTypeInSSEReg(VT)) // FPStack? 4556 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 4557 4558 if ((Opc == X86ISD::CMP || 4559 Opc == X86ISD::COMI || 4560 Opc == X86ISD::UCOMI) && !IllegalFPCMov) { 4561 Cond = Cmp; 4562 addTest = false; 4563 } 4564 } 4565 4566 if (addTest) { 4567 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4568 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4569 } 4570 4571 const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(), 4572 MVT::Flag); 4573 SmallVector<SDOperand, 4> Ops; 4574 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 4575 // condition is true. 4576 Ops.push_back(Op.getOperand(2)); 4577 Ops.push_back(Op.getOperand(1)); 4578 Ops.push_back(CC); 4579 Ops.push_back(Cond); 4580 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 4581} 4582 4583SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { 4584 bool addTest = true; 4585 SDOperand Chain = Op.getOperand(0); 4586 SDOperand Cond = Op.getOperand(1); 4587 SDOperand Dest = Op.getOperand(2); 4588 SDOperand CC; 4589 4590 if (Cond.getOpcode() == ISD::SETCC) 4591 Cond = LowerSETCC(Cond, DAG); 4592 4593 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4594 // setting operand in place of the X86ISD::SETCC. 4595 if (Cond.getOpcode() == X86ISD::SETCC) { 4596 CC = Cond.getOperand(0); 4597 4598 SDOperand Cmp = Cond.getOperand(1); 4599 unsigned Opc = Cmp.getOpcode(); 4600 if (Opc == X86ISD::CMP || 4601 Opc == X86ISD::COMI || 4602 Opc == X86ISD::UCOMI) { 4603 Cond = Cmp; 4604 addTest = false; 4605 } 4606 } 4607 4608 if (addTest) { 4609 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4610 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4611 } 4612 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), 4613 Chain, Op.getOperand(2), CC, Cond); 4614} 4615 4616 4617// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 4618// Calls to _alloca is needed to probe the stack when allocating more than 4k 4619// bytes in one go. Touching the stack at 4K increments is necessary to ensure 4620// that the guard pages used by the OS virtual memory manager are allocated in 4621// correct sequence. 4622SDOperand 4623X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, 4624 SelectionDAG &DAG) { 4625 assert(Subtarget->isTargetCygMing() && 4626 "This should be used only on Cygwin/Mingw targets"); 4627 4628 // Get the inputs. 4629 SDOperand Chain = Op.getOperand(0); 4630 SDOperand Size = Op.getOperand(1); 4631 // FIXME: Ensure alignment here 4632 4633 SDOperand Flag; 4634 4635 MVT::ValueType IntPtr = getPointerTy(); 4636 MVT::ValueType SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; 4637 4638 Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag); 4639 Flag = Chain.getValue(1); 4640 4641 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4642 SDOperand Ops[] = { Chain, 4643 DAG.getTargetExternalSymbol("_alloca", IntPtr), 4644 DAG.getRegister(X86::EAX, IntPtr), 4645 Flag }; 4646 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4); 4647 Flag = Chain.getValue(1); 4648 4649 Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1); 4650 4651 std::vector<MVT::ValueType> Tys; 4652 Tys.push_back(SPTy); 4653 Tys.push_back(MVT::Other); 4654 SDOperand Ops1[2] = { Chain.getValue(0), Chain }; 4655 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2); 4656} 4657 4658SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) { 4659 SDOperand InFlag(0, 0); 4660 SDOperand Chain = Op.getOperand(0); 4661 unsigned Align = 4662 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 4663 if (Align == 0) Align = 1; 4664 4665 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4666 // If not DWORD aligned or size is more than the threshold, call memset. 4667 // The libc version is likely to be faster for these cases. It can use the 4668 // address value and run time information about the CPU. 4669 if ((Align & 3) != 0 || 4670 (I && I->getValue() > Subtarget->getMaxInlineSizeThreshold())) { 4671 MVT::ValueType IntPtr = getPointerTy(); 4672 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 4673 TargetLowering::ArgListTy Args; 4674 TargetLowering::ArgListEntry Entry; 4675 Entry.Node = Op.getOperand(1); 4676 Entry.Ty = IntPtrTy; 4677 Args.push_back(Entry); 4678 // Extend the unsigned i8 argument to be an int value for the call. 4679 Entry.Node = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2)); 4680 Entry.Ty = IntPtrTy; 4681 Args.push_back(Entry); 4682 Entry.Node = Op.getOperand(3); 4683 Args.push_back(Entry); 4684 std::pair<SDOperand,SDOperand> CallResult = 4685 LowerCallTo(Chain, Type::VoidTy, false, false, false, CallingConv::C, 4686 false, DAG.getExternalSymbol("memset", IntPtr), Args, DAG); 4687 return CallResult.second; 4688 } 4689 4690 MVT::ValueType AVT; 4691 SDOperand Count; 4692 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4693 unsigned BytesLeft = 0; 4694 bool TwoRepStos = false; 4695 if (ValC) { 4696 unsigned ValReg; 4697 uint64_t Val = ValC->getValue() & 255; 4698 4699 // If the value is a constant, then we can potentially use larger sets. 4700 switch (Align & 3) { 4701 case 2: // WORD aligned 4702 AVT = MVT::i16; 4703 ValReg = X86::AX; 4704 Val = (Val << 8) | Val; 4705 break; 4706 case 0: // DWORD aligned 4707 AVT = MVT::i32; 4708 ValReg = X86::EAX; 4709 Val = (Val << 8) | Val; 4710 Val = (Val << 16) | Val; 4711 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned 4712 AVT = MVT::i64; 4713 ValReg = X86::RAX; 4714 Val = (Val << 32) | Val; 4715 } 4716 break; 4717 default: // Byte aligned 4718 AVT = MVT::i8; 4719 ValReg = X86::AL; 4720 Count = Op.getOperand(3); 4721 break; 4722 } 4723 4724 if (AVT > MVT::i8) { 4725 if (I) { 4726 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4727 Count = DAG.getIntPtrConstant(I->getValue() / UBytes); 4728 BytesLeft = I->getValue() % UBytes; 4729 } else { 4730 assert(AVT >= MVT::i32 && 4731 "Do not use rep;stos if not at least DWORD aligned"); 4732 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 4733 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 4734 TwoRepStos = true; 4735 } 4736 } 4737 4738 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT), 4739 InFlag); 4740 InFlag = Chain.getValue(1); 4741 } else { 4742 AVT = MVT::i8; 4743 Count = Op.getOperand(3); 4744 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag); 4745 InFlag = Chain.getValue(1); 4746 } 4747 4748 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4749 Count, InFlag); 4750 InFlag = Chain.getValue(1); 4751 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4752 Op.getOperand(1), InFlag); 4753 InFlag = Chain.getValue(1); 4754 4755 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4756 SmallVector<SDOperand, 8> Ops; 4757 Ops.push_back(Chain); 4758 Ops.push_back(DAG.getValueType(AVT)); 4759 Ops.push_back(InFlag); 4760 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4761 4762 if (TwoRepStos) { 4763 InFlag = Chain.getValue(1); 4764 Count = Op.getOperand(3); 4765 MVT::ValueType CVT = Count.getValueType(); 4766 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4767 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4768 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4769 Left, InFlag); 4770 InFlag = Chain.getValue(1); 4771 Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4772 Ops.clear(); 4773 Ops.push_back(Chain); 4774 Ops.push_back(DAG.getValueType(MVT::i8)); 4775 Ops.push_back(InFlag); 4776 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4777 } else if (BytesLeft) { 4778 // Issue stores for the last 1 - 7 bytes. 4779 SDOperand Value; 4780 unsigned Val = ValC->getValue() & 255; 4781 unsigned Offset = I->getValue() - BytesLeft; 4782 SDOperand DstAddr = Op.getOperand(1); 4783 MVT::ValueType AddrVT = DstAddr.getValueType(); 4784 if (BytesLeft >= 4) { 4785 Val = (Val << 8) | Val; 4786 Val = (Val << 16) | Val; 4787 Value = DAG.getConstant(Val, MVT::i32); 4788 Chain = DAG.getStore(Chain, Value, 4789 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4790 DAG.getConstant(Offset, AddrVT)), 4791 NULL, 0); 4792 BytesLeft -= 4; 4793 Offset += 4; 4794 } 4795 if (BytesLeft >= 2) { 4796 Value = DAG.getConstant((Val << 8) | Val, MVT::i16); 4797 Chain = DAG.getStore(Chain, Value, 4798 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4799 DAG.getConstant(Offset, AddrVT)), 4800 NULL, 0); 4801 BytesLeft -= 2; 4802 Offset += 2; 4803 } 4804 if (BytesLeft == 1) { 4805 Value = DAG.getConstant(Val, MVT::i8); 4806 Chain = DAG.getStore(Chain, Value, 4807 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4808 DAG.getConstant(Offset, AddrVT)), 4809 NULL, 0); 4810 } 4811 } 4812 4813 return Chain; 4814} 4815 4816SDOperand X86TargetLowering::LowerMEMCPYInline(SDOperand Chain, 4817 SDOperand Dest, 4818 SDOperand Source, 4819 unsigned Size, 4820 unsigned Align, 4821 SelectionDAG &DAG) { 4822 MVT::ValueType AVT; 4823 unsigned BytesLeft = 0; 4824 switch (Align & 3) { 4825 case 2: // WORD aligned 4826 AVT = MVT::i16; 4827 break; 4828 case 0: // DWORD aligned 4829 AVT = MVT::i32; 4830 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned 4831 AVT = MVT::i64; 4832 break; 4833 default: // Byte aligned 4834 AVT = MVT::i8; 4835 break; 4836 } 4837 4838 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4839 SDOperand Count = DAG.getIntPtrConstant(Size / UBytes); 4840 BytesLeft = Size % UBytes; 4841 4842 SDOperand InFlag(0, 0); 4843 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4844 Count, InFlag); 4845 InFlag = Chain.getValue(1); 4846 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4847 Dest, InFlag); 4848 InFlag = Chain.getValue(1); 4849 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI, 4850 Source, InFlag); 4851 InFlag = Chain.getValue(1); 4852 4853 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4854 SmallVector<SDOperand, 8> Ops; 4855 Ops.push_back(Chain); 4856 Ops.push_back(DAG.getValueType(AVT)); 4857 Ops.push_back(InFlag); 4858 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4859 4860 if (BytesLeft) { 4861 // Issue loads and stores for the last 1 - 7 bytes. 4862 unsigned Offset = Size - BytesLeft; 4863 SDOperand DstAddr = Dest; 4864 MVT::ValueType DstVT = DstAddr.getValueType(); 4865 SDOperand SrcAddr = Source; 4866 MVT::ValueType SrcVT = SrcAddr.getValueType(); 4867 SDOperand Value; 4868 if (BytesLeft >= 4) { 4869 Value = DAG.getLoad(MVT::i32, Chain, 4870 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4871 DAG.getConstant(Offset, SrcVT)), 4872 NULL, 0); 4873 Chain = Value.getValue(1); 4874 Chain = DAG.getStore(Chain, Value, 4875 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4876 DAG.getConstant(Offset, DstVT)), 4877 NULL, 0); 4878 BytesLeft -= 4; 4879 Offset += 4; 4880 } 4881 if (BytesLeft >= 2) { 4882 Value = DAG.getLoad(MVT::i16, Chain, 4883 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4884 DAG.getConstant(Offset, SrcVT)), 4885 NULL, 0); 4886 Chain = Value.getValue(1); 4887 Chain = DAG.getStore(Chain, Value, 4888 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4889 DAG.getConstant(Offset, DstVT)), 4890 NULL, 0); 4891 BytesLeft -= 2; 4892 Offset += 2; 4893 } 4894 4895 if (BytesLeft == 1) { 4896 Value = DAG.getLoad(MVT::i8, Chain, 4897 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4898 DAG.getConstant(Offset, SrcVT)), 4899 NULL, 0); 4900 Chain = Value.getValue(1); 4901 Chain = DAG.getStore(Chain, Value, 4902 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4903 DAG.getConstant(Offset, DstVT)), 4904 NULL, 0); 4905 } 4906 } 4907 4908 return Chain; 4909} 4910 4911/// Expand the result of: i64,outchain = READCYCLECOUNTER inchain 4912SDNode *X86TargetLowering::ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG){ 4913 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4914 SDOperand TheChain = N->getOperand(0); 4915 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheChain, 1); 4916 if (Subtarget->is64Bit()) { 4917 SDOperand rax = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); 4918 SDOperand rdx = DAG.getCopyFromReg(rax.getValue(1), X86::RDX, 4919 MVT::i64, rax.getValue(2)); 4920 SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, rdx, 4921 DAG.getConstant(32, MVT::i8)); 4922 SDOperand Ops[] = { 4923 DAG.getNode(ISD::OR, MVT::i64, rax, Tmp), rdx.getValue(1) 4924 }; 4925 4926 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4927 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4928 } 4929 4930 SDOperand eax = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); 4931 SDOperand edx = DAG.getCopyFromReg(eax.getValue(1), X86::EDX, 4932 MVT::i32, eax.getValue(2)); 4933 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 4934 SDOperand Ops[] = { eax, edx }; 4935 Ops[0] = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Ops, 2); 4936 4937 // Use a MERGE_VALUES to return the value and chain. 4938 Ops[1] = edx.getValue(1); 4939 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4940 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4941} 4942 4943SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { 4944 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 4945 4946 if (!Subtarget->is64Bit()) { 4947 // vastart just stores the address of the VarArgsFrameIndex slot into the 4948 // memory location argument. 4949 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4950 return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV, 0); 4951 } 4952 4953 // __va_list_tag: 4954 // gp_offset (0 - 6 * 8) 4955 // fp_offset (48 - 48 + 8 * 16) 4956 // overflow_arg_area (point to parameters coming in memory). 4957 // reg_save_area 4958 SmallVector<SDOperand, 8> MemOps; 4959 SDOperand FIN = Op.getOperand(1); 4960 // Store gp_offset 4961 SDOperand Store = DAG.getStore(Op.getOperand(0), 4962 DAG.getConstant(VarArgsGPOffset, MVT::i32), 4963 FIN, SV, 0); 4964 MemOps.push_back(Store); 4965 4966 // Store fp_offset 4967 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4968 Store = DAG.getStore(Op.getOperand(0), 4969 DAG.getConstant(VarArgsFPOffset, MVT::i32), 4970 FIN, SV, 0); 4971 MemOps.push_back(Store); 4972 4973 // Store ptr to overflow_arg_area 4974 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4975 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4976 Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV, 0); 4977 MemOps.push_back(Store); 4978 4979 // Store ptr to reg_save_area. 4980 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); 4981 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 4982 Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV, 0); 4983 MemOps.push_back(Store); 4984 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size()); 4985} 4986 4987SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) { 4988 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 4989 SDOperand Chain = Op.getOperand(0); 4990 SDOperand DstPtr = Op.getOperand(1); 4991 SDOperand SrcPtr = Op.getOperand(2); 4992 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 4993 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 4994 4995 SrcPtr = DAG.getLoad(getPointerTy(), Chain, SrcPtr, SrcSV, 0); 4996 Chain = SrcPtr.getValue(1); 4997 for (unsigned i = 0; i < 3; ++i) { 4998 SDOperand Val = DAG.getLoad(MVT::i64, Chain, SrcPtr, SrcSV, 0); 4999 Chain = Val.getValue(1); 5000 Chain = DAG.getStore(Chain, Val, DstPtr, DstSV, 0); 5001 if (i == 2) 5002 break; 5003 SrcPtr = DAG.getNode(ISD::ADD, getPointerTy(), SrcPtr, 5004 DAG.getIntPtrConstant(8)); 5005 DstPtr = DAG.getNode(ISD::ADD, getPointerTy(), DstPtr, 5006 DAG.getIntPtrConstant(8)); 5007 } 5008 return Chain; 5009} 5010 5011SDOperand 5012X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 5013 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); 5014 switch (IntNo) { 5015 default: return SDOperand(); // Don't custom lower most intrinsics. 5016 // Comparison intrinsics. 5017 case Intrinsic::x86_sse_comieq_ss: 5018 case Intrinsic::x86_sse_comilt_ss: 5019 case Intrinsic::x86_sse_comile_ss: 5020 case Intrinsic::x86_sse_comigt_ss: 5021 case Intrinsic::x86_sse_comige_ss: 5022 case Intrinsic::x86_sse_comineq_ss: 5023 case Intrinsic::x86_sse_ucomieq_ss: 5024 case Intrinsic::x86_sse_ucomilt_ss: 5025 case Intrinsic::x86_sse_ucomile_ss: 5026 case Intrinsic::x86_sse_ucomigt_ss: 5027 case Intrinsic::x86_sse_ucomige_ss: 5028 case Intrinsic::x86_sse_ucomineq_ss: 5029 case Intrinsic::x86_sse2_comieq_sd: 5030 case Intrinsic::x86_sse2_comilt_sd: 5031 case Intrinsic::x86_sse2_comile_sd: 5032 case Intrinsic::x86_sse2_comigt_sd: 5033 case Intrinsic::x86_sse2_comige_sd: 5034 case Intrinsic::x86_sse2_comineq_sd: 5035 case Intrinsic::x86_sse2_ucomieq_sd: 5036 case Intrinsic::x86_sse2_ucomilt_sd: 5037 case Intrinsic::x86_sse2_ucomile_sd: 5038 case Intrinsic::x86_sse2_ucomigt_sd: 5039 case Intrinsic::x86_sse2_ucomige_sd: 5040 case Intrinsic::x86_sse2_ucomineq_sd: { 5041 unsigned Opc = 0; 5042 ISD::CondCode CC = ISD::SETCC_INVALID; 5043 switch (IntNo) { 5044 default: break; 5045 case Intrinsic::x86_sse_comieq_ss: 5046 case Intrinsic::x86_sse2_comieq_sd: 5047 Opc = X86ISD::COMI; 5048 CC = ISD::SETEQ; 5049 break; 5050 case Intrinsic::x86_sse_comilt_ss: 5051 case Intrinsic::x86_sse2_comilt_sd: 5052 Opc = X86ISD::COMI; 5053 CC = ISD::SETLT; 5054 break; 5055 case Intrinsic::x86_sse_comile_ss: 5056 case Intrinsic::x86_sse2_comile_sd: 5057 Opc = X86ISD::COMI; 5058 CC = ISD::SETLE; 5059 break; 5060 case Intrinsic::x86_sse_comigt_ss: 5061 case Intrinsic::x86_sse2_comigt_sd: 5062 Opc = X86ISD::COMI; 5063 CC = ISD::SETGT; 5064 break; 5065 case Intrinsic::x86_sse_comige_ss: 5066 case Intrinsic::x86_sse2_comige_sd: 5067 Opc = X86ISD::COMI; 5068 CC = ISD::SETGE; 5069 break; 5070 case Intrinsic::x86_sse_comineq_ss: 5071 case Intrinsic::x86_sse2_comineq_sd: 5072 Opc = X86ISD::COMI; 5073 CC = ISD::SETNE; 5074 break; 5075 case Intrinsic::x86_sse_ucomieq_ss: 5076 case Intrinsic::x86_sse2_ucomieq_sd: 5077 Opc = X86ISD::UCOMI; 5078 CC = ISD::SETEQ; 5079 break; 5080 case Intrinsic::x86_sse_ucomilt_ss: 5081 case Intrinsic::x86_sse2_ucomilt_sd: 5082 Opc = X86ISD::UCOMI; 5083 CC = ISD::SETLT; 5084 break; 5085 case Intrinsic::x86_sse_ucomile_ss: 5086 case Intrinsic::x86_sse2_ucomile_sd: 5087 Opc = X86ISD::UCOMI; 5088 CC = ISD::SETLE; 5089 break; 5090 case Intrinsic::x86_sse_ucomigt_ss: 5091 case Intrinsic::x86_sse2_ucomigt_sd: 5092 Opc = X86ISD::UCOMI; 5093 CC = ISD::SETGT; 5094 break; 5095 case Intrinsic::x86_sse_ucomige_ss: 5096 case Intrinsic::x86_sse2_ucomige_sd: 5097 Opc = X86ISD::UCOMI; 5098 CC = ISD::SETGE; 5099 break; 5100 case Intrinsic::x86_sse_ucomineq_ss: 5101 case Intrinsic::x86_sse2_ucomineq_sd: 5102 Opc = X86ISD::UCOMI; 5103 CC = ISD::SETNE; 5104 break; 5105 } 5106 5107 unsigned X86CC; 5108 SDOperand LHS = Op.getOperand(1); 5109 SDOperand RHS = Op.getOperand(2); 5110 translateX86CC(CC, true, X86CC, LHS, RHS, DAG); 5111 5112 SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS); 5113 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8, 5114 DAG.getConstant(X86CC, MVT::i8), Cond); 5115 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); 5116 } 5117 } 5118} 5119 5120SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { 5121 // Depths > 0 not supported yet! 5122 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 5123 return SDOperand(); 5124 5125 // Just load the return address 5126 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 5127 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); 5128} 5129 5130SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { 5131 // Depths > 0 not supported yet! 5132 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 5133 return SDOperand(); 5134 5135 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 5136 return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI, 5137 DAG.getIntPtrConstant(4)); 5138} 5139 5140SDOperand X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDOperand Op, 5141 SelectionDAG &DAG) { 5142 // Is not yet supported on x86-64 5143 if (Subtarget->is64Bit()) 5144 return SDOperand(); 5145 5146 return DAG.getIntPtrConstant(8); 5147} 5148 5149SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG) 5150{ 5151 assert(!Subtarget->is64Bit() && 5152 "Lowering of eh_return builtin is not supported yet on x86-64"); 5153 5154 MachineFunction &MF = DAG.getMachineFunction(); 5155 SDOperand Chain = Op.getOperand(0); 5156 SDOperand Offset = Op.getOperand(1); 5157 SDOperand Handler = Op.getOperand(2); 5158 5159 SDOperand Frame = DAG.getRegister(RegInfo->getFrameRegister(MF), 5160 getPointerTy()); 5161 5162 SDOperand StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame, 5163 DAG.getIntPtrConstant(-4UL)); 5164 StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset); 5165 Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0); 5166 Chain = DAG.getCopyToReg(Chain, X86::ECX, StoreAddr); 5167 MF.getRegInfo().addLiveOut(X86::ECX); 5168 5169 return DAG.getNode(X86ISD::EH_RETURN, MVT::Other, 5170 Chain, DAG.getRegister(X86::ECX, getPointerTy())); 5171} 5172 5173SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, 5174 SelectionDAG &DAG) { 5175 SDOperand Root = Op.getOperand(0); 5176 SDOperand Trmp = Op.getOperand(1); // trampoline 5177 SDOperand FPtr = Op.getOperand(2); // nested function 5178 SDOperand Nest = Op.getOperand(3); // 'nest' parameter value 5179 5180 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 5181 5182 const X86InstrInfo *TII = 5183 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 5184 5185 if (Subtarget->is64Bit()) { 5186 SDOperand OutChains[6]; 5187 5188 // Large code-model. 5189 5190 const unsigned char JMP64r = TII->getBaseOpcodeFor(X86::JMP64r); 5191 const unsigned char MOV64ri = TII->getBaseOpcodeFor(X86::MOV64ri); 5192 5193 const unsigned char N86R10 = 5194 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R10); 5195 const unsigned char N86R11 = 5196 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R11); 5197 5198 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 5199 5200 // Load the pointer to the nested function into R11. 5201 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 5202 SDOperand Addr = Trmp; 5203 OutChains[0] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5204 TrmpAddr, 0); 5205 5206 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(2, MVT::i64)); 5207 OutChains[1] = DAG.getStore(Root, FPtr, Addr, TrmpAddr, 2, false, 2); 5208 5209 // Load the 'nest' parameter value into R10. 5210 // R10 is specified in X86CallingConv.td 5211 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 5212 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(10, MVT::i64)); 5213 OutChains[2] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5214 TrmpAddr, 10); 5215 5216 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(12, MVT::i64)); 5217 OutChains[3] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 12, false, 2); 5218 5219 // Jump to the nested function. 5220 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 5221 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(20, MVT::i64)); 5222 OutChains[4] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5223 TrmpAddr, 20); 5224 5225 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 5226 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(22, MVT::i64)); 5227 OutChains[5] = DAG.getStore(Root, DAG.getConstant(ModRM, MVT::i8), Addr, 5228 TrmpAddr, 22); 5229 5230 SDOperand Ops[] = 5231 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 6) }; 5232 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5233 } else { 5234 const Function *Func = 5235 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 5236 unsigned CC = Func->getCallingConv(); 5237 unsigned NestReg; 5238 5239 switch (CC) { 5240 default: 5241 assert(0 && "Unsupported calling convention"); 5242 case CallingConv::C: 5243 case CallingConv::X86_StdCall: { 5244 // Pass 'nest' parameter in ECX. 5245 // Must be kept in sync with X86CallingConv.td 5246 NestReg = X86::ECX; 5247 5248 // Check that ECX wasn't needed by an 'inreg' parameter. 5249 const FunctionType *FTy = Func->getFunctionType(); 5250 const ParamAttrsList *Attrs = Func->getParamAttrs(); 5251 5252 if (Attrs && !Func->isVarArg()) { 5253 unsigned InRegCount = 0; 5254 unsigned Idx = 1; 5255 5256 for (FunctionType::param_iterator I = FTy->param_begin(), 5257 E = FTy->param_end(); I != E; ++I, ++Idx) 5258 if (Attrs->paramHasAttr(Idx, ParamAttr::InReg)) 5259 // FIXME: should only count parameters that are lowered to integers. 5260 InRegCount += (getTargetData()->getTypeSizeInBits(*I) + 31) / 32; 5261 5262 if (InRegCount > 2) { 5263 cerr << "Nest register in use - reduce number of inreg parameters!\n"; 5264 abort(); 5265 } 5266 } 5267 break; 5268 } 5269 case CallingConv::X86_FastCall: 5270 // Pass 'nest' parameter in EAX. 5271 // Must be kept in sync with X86CallingConv.td 5272 NestReg = X86::EAX; 5273 break; 5274 } 5275 5276 SDOperand OutChains[4]; 5277 SDOperand Addr, Disp; 5278 5279 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32)); 5280 Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr); 5281 5282 const unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri); 5283 const unsigned char N86Reg = 5284 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(NestReg); 5285 OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 5286 Trmp, TrmpAddr, 0); 5287 5288 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32)); 5289 OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 1, false, 1); 5290 5291 const unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP); 5292 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32)); 5293 OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr, 5294 TrmpAddr, 5, false, 1); 5295 5296 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(6, MVT::i32)); 5297 OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpAddr, 6, false, 1); 5298 5299 SDOperand Ops[] = 5300 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) }; 5301 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5302 } 5303} 5304 5305SDOperand X86TargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { 5306 /* 5307 The rounding mode is in bits 11:10 of FPSR, and has the following 5308 settings: 5309 00 Round to nearest 5310 01 Round to -inf 5311 10 Round to +inf 5312 11 Round to 0 5313 5314 FLT_ROUNDS, on the other hand, expects the following: 5315 -1 Undefined 5316 0 Round to 0 5317 1 Round to nearest 5318 2 Round to +inf 5319 3 Round to -inf 5320 5321 To perform the conversion, we do: 5322 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 5323 */ 5324 5325 MachineFunction &MF = DAG.getMachineFunction(); 5326 const TargetMachine &TM = MF.getTarget(); 5327 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 5328 unsigned StackAlignment = TFI.getStackAlignment(); 5329 MVT::ValueType VT = Op.getValueType(); 5330 5331 // Save FP Control Word to stack slot 5332 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment); 5333 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 5334 5335 SDOperand Chain = DAG.getNode(X86ISD::FNSTCW16m, MVT::Other, 5336 DAG.getEntryNode(), StackSlot); 5337 5338 // Load FP Control Word from stack slot 5339 SDOperand CWD = DAG.getLoad(MVT::i16, Chain, StackSlot, NULL, 0); 5340 5341 // Transform as necessary 5342 SDOperand CWD1 = 5343 DAG.getNode(ISD::SRL, MVT::i16, 5344 DAG.getNode(ISD::AND, MVT::i16, 5345 CWD, DAG.getConstant(0x800, MVT::i16)), 5346 DAG.getConstant(11, MVT::i8)); 5347 SDOperand CWD2 = 5348 DAG.getNode(ISD::SRL, MVT::i16, 5349 DAG.getNode(ISD::AND, MVT::i16, 5350 CWD, DAG.getConstant(0x400, MVT::i16)), 5351 DAG.getConstant(9, MVT::i8)); 5352 5353 SDOperand RetVal = 5354 DAG.getNode(ISD::AND, MVT::i16, 5355 DAG.getNode(ISD::ADD, MVT::i16, 5356 DAG.getNode(ISD::OR, MVT::i16, CWD1, CWD2), 5357 DAG.getConstant(1, MVT::i16)), 5358 DAG.getConstant(3, MVT::i16)); 5359 5360 5361 return DAG.getNode((MVT::getSizeInBits(VT) < 16 ? 5362 ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); 5363} 5364 5365SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) { 5366 MVT::ValueType VT = Op.getValueType(); 5367 MVT::ValueType OpVT = VT; 5368 unsigned NumBits = MVT::getSizeInBits(VT); 5369 5370 Op = Op.getOperand(0); 5371 if (VT == MVT::i8) { 5372 // Zero extend to i32 since there is not an i8 bsr. 5373 OpVT = MVT::i32; 5374 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5375 } 5376 5377 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 5378 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5379 Op = DAG.getNode(X86ISD::BSR, VTs, Op); 5380 5381 // If src is zero (i.e. bsr sets ZF), returns NumBits. 5382 SmallVector<SDOperand, 4> Ops; 5383 Ops.push_back(Op); 5384 Ops.push_back(DAG.getConstant(NumBits+NumBits-1, OpVT)); 5385 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5386 Ops.push_back(Op.getValue(1)); 5387 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5388 5389 // Finally xor with NumBits-1. 5390 Op = DAG.getNode(ISD::XOR, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 5391 5392 if (VT == MVT::i8) 5393 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5394 return Op; 5395} 5396 5397SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { 5398 MVT::ValueType VT = Op.getValueType(); 5399 MVT::ValueType OpVT = VT; 5400 unsigned NumBits = MVT::getSizeInBits(VT); 5401 5402 Op = Op.getOperand(0); 5403 if (VT == MVT::i8) { 5404 OpVT = MVT::i32; 5405 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5406 } 5407 5408 // Issue a bsf (scan bits forward) which also sets EFLAGS. 5409 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5410 Op = DAG.getNode(X86ISD::BSF, VTs, Op); 5411 5412 // If src is zero (i.e. bsf sets ZF), returns NumBits. 5413 SmallVector<SDOperand, 4> Ops; 5414 Ops.push_back(Op); 5415 Ops.push_back(DAG.getConstant(NumBits, OpVT)); 5416 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5417 Ops.push_back(Op.getValue(1)); 5418 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5419 5420 if (VT == MVT::i8) 5421 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5422 return Op; 5423} 5424 5425SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) { 5426 MVT::ValueType T = cast<AtomicSDNode>(Op.Val)->getVT(); 5427 unsigned Reg = 0; 5428 unsigned size = 0; 5429 switch(T) { 5430 case MVT::i8: Reg = X86::AL; size = 1; break; 5431 case MVT::i16: Reg = X86::AX; size = 2; break; 5432 case MVT::i32: Reg = X86::EAX; size = 4; break; 5433 case MVT::i64: 5434 if (Subtarget->is64Bit()) { 5435 Reg = X86::RAX; size = 8; 5436 } else //Should go away when LowerType stuff lands 5437 return SDOperand(ExpandATOMIC_LCS(Op.Val, DAG), 0); 5438 break; 5439 }; 5440 SDOperand cpIn = DAG.getCopyToReg(Op.getOperand(0), Reg, 5441 Op.getOperand(3), SDOperand()); 5442 SDOperand Ops[] = { cpIn.getValue(0), 5443 Op.getOperand(1), 5444 Op.getOperand(2), 5445 DAG.getTargetConstant(size, MVT::i8), 5446 cpIn.getValue(1) }; 5447 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 5448 SDOperand Result = DAG.getNode(X86ISD::LCMPXCHG_DAG, Tys, Ops, 5); 5449 SDOperand cpOut = 5450 DAG.getCopyFromReg(Result.getValue(0), Reg, T, Result.getValue(1)); 5451 return cpOut; 5452} 5453 5454SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) { 5455 MVT::ValueType T = cast<AtomicSDNode>(Op)->getVT(); 5456 assert (T == MVT::i64 && "Only know how to expand i64 CAS"); 5457 SDOperand cpInL, cpInH; 5458 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), 5459 DAG.getConstant(0, MVT::i32)); 5460 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), 5461 DAG.getConstant(1, MVT::i32)); 5462 cpInL = DAG.getCopyToReg(Op->getOperand(0), X86::EAX, 5463 cpInL, SDOperand()); 5464 cpInH = DAG.getCopyToReg(cpInL.getValue(0), X86::EDX, 5465 cpInH, cpInL.getValue(1)); 5466 SDOperand swapInL, swapInH; 5467 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(2), 5468 DAG.getConstant(0, MVT::i32)); 5469 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(2), 5470 DAG.getConstant(1, MVT::i32)); 5471 swapInL = DAG.getCopyToReg(cpInH.getValue(0), X86::EBX, 5472 swapInL, cpInH.getValue(1)); 5473 swapInH = DAG.getCopyToReg(swapInL.getValue(0), X86::ECX, 5474 swapInH, swapInL.getValue(1)); 5475 SDOperand Ops[] = { swapInH.getValue(0), 5476 Op->getOperand(1), 5477 swapInH.getValue(1)}; 5478 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 5479 SDOperand Result = DAG.getNode(X86ISD::LCMPXCHG8_DAG, Tys, Ops, 3); 5480 SDOperand cpOutL = DAG.getCopyFromReg(Result.getValue(0), X86::EAX, MVT::i32, 5481 Result.getValue(1)); 5482 SDOperand cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), X86::EDX, MVT::i32, 5483 cpOutL.getValue(2)); 5484 SDOperand OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 5485 SDOperand ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OpsF, 2); 5486 Tys = DAG.getVTList(MVT::i64, MVT::Other); 5487 return DAG.getNode(ISD::MERGE_VALUES, Tys, ResultVal, cpOutH.getValue(1)).Val; 5488} 5489 5490/// LowerOperation - Provide custom lowering hooks for some operations. 5491/// 5492SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 5493 switch (Op.getOpcode()) { 5494 default: assert(0 && "Should not custom lower this!"); 5495 case ISD::ATOMIC_LCS: return LowerLCS(Op,DAG); 5496 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 5497 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5498 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 5499 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 5500 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 5501 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5502 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 5503 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5504 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 5505 case ISD::SHL_PARTS: 5506 case ISD::SRA_PARTS: 5507 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 5508 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 5509 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 5510 case ISD::FABS: return LowerFABS(Op, DAG); 5511 case ISD::FNEG: return LowerFNEG(Op, DAG); 5512 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 5513 case ISD::SETCC: return LowerSETCC(Op, DAG); 5514 case ISD::SELECT: return LowerSELECT(Op, DAG); 5515 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 5516 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 5517 case ISD::CALL: return LowerCALL(Op, DAG); 5518 case ISD::RET: return LowerRET(Op, DAG); 5519 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); 5520 case ISD::MEMSET: return LowerMEMSET(Op, DAG); 5521 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG); 5522 case ISD::VASTART: return LowerVASTART(Op, DAG); 5523 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 5524 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 5525 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5526 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5527 case ISD::FRAME_TO_ARGS_OFFSET: 5528 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 5529 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 5530 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 5531 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 5532 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5533 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 5534 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 5535 5536 // FIXME: REMOVE THIS WHEN LegalizeDAGTypes lands. 5537 case ISD::READCYCLECOUNTER: 5538 return SDOperand(ExpandREADCYCLECOUNTER(Op.Val, DAG), 0); 5539 } 5540} 5541 5542/// ExpandOperation - Provide custom lowering hooks for expanding operations. 5543SDNode *X86TargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) { 5544 switch (N->getOpcode()) { 5545 default: assert(0 && "Should not custom lower this!"); 5546 case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG); 5547 case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG); 5548 case ISD::ATOMIC_LCS: return ExpandATOMIC_LCS(N, DAG); 5549 } 5550} 5551 5552const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 5553 switch (Opcode) { 5554 default: return NULL; 5555 case X86ISD::BSF: return "X86ISD::BSF"; 5556 case X86ISD::BSR: return "X86ISD::BSR"; 5557 case X86ISD::SHLD: return "X86ISD::SHLD"; 5558 case X86ISD::SHRD: return "X86ISD::SHRD"; 5559 case X86ISD::FAND: return "X86ISD::FAND"; 5560 case X86ISD::FOR: return "X86ISD::FOR"; 5561 case X86ISD::FXOR: return "X86ISD::FXOR"; 5562 case X86ISD::FSRL: return "X86ISD::FSRL"; 5563 case X86ISD::FILD: return "X86ISD::FILD"; 5564 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 5565 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 5566 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 5567 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 5568 case X86ISD::FLD: return "X86ISD::FLD"; 5569 case X86ISD::FST: return "X86ISD::FST"; 5570 case X86ISD::FP_GET_ST0: return "X86ISD::FP_GET_ST0"; 5571 case X86ISD::FP_GET_ST0_ST1: return "X86ISD::FP_GET_ST0_ST1"; 5572 case X86ISD::FP_SET_ST0: return "X86ISD::FP_SET_ST0"; 5573 case X86ISD::CALL: return "X86ISD::CALL"; 5574 case X86ISD::TAILCALL: return "X86ISD::TAILCALL"; 5575 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 5576 case X86ISD::CMP: return "X86ISD::CMP"; 5577 case X86ISD::COMI: return "X86ISD::COMI"; 5578 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 5579 case X86ISD::SETCC: return "X86ISD::SETCC"; 5580 case X86ISD::CMOV: return "X86ISD::CMOV"; 5581 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 5582 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 5583 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 5584 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 5585 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 5586 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 5587 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 5588 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 5589 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 5590 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 5591 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 5592 case X86ISD::FMAX: return "X86ISD::FMAX"; 5593 case X86ISD::FMIN: return "X86ISD::FMIN"; 5594 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 5595 case X86ISD::FRCP: return "X86ISD::FRCP"; 5596 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 5597 case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER"; 5598 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 5599 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 5600 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 5601 case X86ISD::LCMPXCHG_DAG: return "x86ISD::LCMPXCHG_DAG"; 5602 case X86ISD::LCMPXCHG8_DAG: return "x86ISD::LCMPXCHG8_DAG"; 5603 } 5604} 5605 5606// isLegalAddressingMode - Return true if the addressing mode represented 5607// by AM is legal for this target, for a load/store of the specified type. 5608bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 5609 const Type *Ty) const { 5610 // X86 supports extremely general addressing modes. 5611 5612 // X86 allows a sign-extended 32-bit immediate field as a displacement. 5613 if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1) 5614 return false; 5615 5616 if (AM.BaseGV) { 5617 // We can only fold this if we don't need an extra load. 5618 if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false)) 5619 return false; 5620 5621 // X86-64 only supports addr of globals in small code model. 5622 if (Subtarget->is64Bit()) { 5623 if (getTargetMachine().getCodeModel() != CodeModel::Small) 5624 return false; 5625 // If lower 4G is not available, then we must use rip-relative addressing. 5626 if (AM.BaseOffs || AM.Scale > 1) 5627 return false; 5628 } 5629 } 5630 5631 switch (AM.Scale) { 5632 case 0: 5633 case 1: 5634 case 2: 5635 case 4: 5636 case 8: 5637 // These scales always work. 5638 break; 5639 case 3: 5640 case 5: 5641 case 9: 5642 // These scales are formed with basereg+scalereg. Only accept if there is 5643 // no basereg yet. 5644 if (AM.HasBaseReg) 5645 return false; 5646 break; 5647 default: // Other stuff never works. 5648 return false; 5649 } 5650 5651 return true; 5652} 5653 5654 5655bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const { 5656 if (!Ty1->isInteger() || !Ty2->isInteger()) 5657 return false; 5658 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 5659 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 5660 if (NumBits1 <= NumBits2) 5661 return false; 5662 return Subtarget->is64Bit() || NumBits1 < 64; 5663} 5664 5665bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1, 5666 MVT::ValueType VT2) const { 5667 if (!MVT::isInteger(VT1) || !MVT::isInteger(VT2)) 5668 return false; 5669 unsigned NumBits1 = MVT::getSizeInBits(VT1); 5670 unsigned NumBits2 = MVT::getSizeInBits(VT2); 5671 if (NumBits1 <= NumBits2) 5672 return false; 5673 return Subtarget->is64Bit() || NumBits1 < 64; 5674} 5675 5676/// isShuffleMaskLegal - Targets can use this to indicate that they only 5677/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 5678/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 5679/// are assumed to be legal. 5680bool 5681X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { 5682 // Only do shuffles on 128-bit vector types for now. 5683 if (MVT::getSizeInBits(VT) == 64) return false; 5684 return (Mask.Val->getNumOperands() <= 4 || 5685 isIdentityMask(Mask.Val) || 5686 isIdentityMask(Mask.Val, true) || 5687 isSplatMask(Mask.Val) || 5688 isPSHUFHW_PSHUFLWMask(Mask.Val) || 5689 X86::isUNPCKLMask(Mask.Val) || 5690 X86::isUNPCKHMask(Mask.Val) || 5691 X86::isUNPCKL_v_undef_Mask(Mask.Val) || 5692 X86::isUNPCKH_v_undef_Mask(Mask.Val)); 5693} 5694 5695bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps, 5696 MVT::ValueType EVT, 5697 SelectionDAG &DAG) const { 5698 unsigned NumElts = BVOps.size(); 5699 // Only do shuffles on 128-bit vector types for now. 5700 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; 5701 if (NumElts == 2) return true; 5702 if (NumElts == 4) { 5703 return (isMOVLMask(&BVOps[0], 4) || 5704 isCommutedMOVL(&BVOps[0], 4, true) || 5705 isSHUFPMask(&BVOps[0], 4) || 5706 isCommutedSHUFP(&BVOps[0], 4)); 5707 } 5708 return false; 5709} 5710 5711//===----------------------------------------------------------------------===// 5712// X86 Scheduler Hooks 5713//===----------------------------------------------------------------------===// 5714 5715MachineBasicBlock * 5716X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 5717 MachineBasicBlock *BB) { 5718 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5719 switch (MI->getOpcode()) { 5720 default: assert(false && "Unexpected instr type to insert"); 5721 case X86::CMOV_FR32: 5722 case X86::CMOV_FR64: 5723 case X86::CMOV_V4F32: 5724 case X86::CMOV_V2F64: 5725 case X86::CMOV_V2I64: { 5726 // To "insert" a SELECT_CC instruction, we actually have to insert the 5727 // diamond control-flow pattern. The incoming instruction knows the 5728 // destination vreg to set, the condition code register to branch on, the 5729 // true/false values to select between, and a branch opcode to use. 5730 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5731 ilist<MachineBasicBlock>::iterator It = BB; 5732 ++It; 5733 5734 // thisMBB: 5735 // ... 5736 // TrueVal = ... 5737 // cmpTY ccX, r1, r2 5738 // bCC copy1MBB 5739 // fallthrough --> copy0MBB 5740 MachineBasicBlock *thisMBB = BB; 5741 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 5742 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 5743 unsigned Opc = 5744 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 5745 BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB); 5746 MachineFunction *F = BB->getParent(); 5747 F->getBasicBlockList().insert(It, copy0MBB); 5748 F->getBasicBlockList().insert(It, sinkMBB); 5749 // Update machine-CFG edges by first adding all successors of the current 5750 // block to the new block which will contain the Phi node for the select. 5751 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 5752 e = BB->succ_end(); i != e; ++i) 5753 sinkMBB->addSuccessor(*i); 5754 // Next, remove all successors of the current block, and add the true 5755 // and fallthrough blocks as its successors. 5756 while(!BB->succ_empty()) 5757 BB->removeSuccessor(BB->succ_begin()); 5758 BB->addSuccessor(copy0MBB); 5759 BB->addSuccessor(sinkMBB); 5760 5761 // copy0MBB: 5762 // %FalseValue = ... 5763 // # fallthrough to sinkMBB 5764 BB = copy0MBB; 5765 5766 // Update machine-CFG edges 5767 BB->addSuccessor(sinkMBB); 5768 5769 // sinkMBB: 5770 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 5771 // ... 5772 BB = sinkMBB; 5773 BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg()) 5774 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 5775 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 5776 5777 delete MI; // The pseudo instruction is gone now. 5778 return BB; 5779 } 5780 5781 case X86::FP32_TO_INT16_IN_MEM: 5782 case X86::FP32_TO_INT32_IN_MEM: 5783 case X86::FP32_TO_INT64_IN_MEM: 5784 case X86::FP64_TO_INT16_IN_MEM: 5785 case X86::FP64_TO_INT32_IN_MEM: 5786 case X86::FP64_TO_INT64_IN_MEM: 5787 case X86::FP80_TO_INT16_IN_MEM: 5788 case X86::FP80_TO_INT32_IN_MEM: 5789 case X86::FP80_TO_INT64_IN_MEM: { 5790 // Change the floating point control register to use "round towards zero" 5791 // mode when truncating to an integer value. 5792 MachineFunction *F = BB->getParent(); 5793 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2); 5794 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx); 5795 5796 // Load the old value of the high byte of the control word... 5797 unsigned OldCW = 5798 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass); 5799 addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx); 5800 5801 // Set the high part to be round to zero... 5802 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx) 5803 .addImm(0xC7F); 5804 5805 // Reload the modified control word now... 5806 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5807 5808 // Restore the memory image of control word to original value 5809 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx) 5810 .addReg(OldCW); 5811 5812 // Get the X86 opcode to use. 5813 unsigned Opc; 5814 switch (MI->getOpcode()) { 5815 default: assert(0 && "illegal opcode!"); 5816 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 5817 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 5818 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 5819 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 5820 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 5821 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 5822 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 5823 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 5824 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 5825 } 5826 5827 X86AddressMode AM; 5828 MachineOperand &Op = MI->getOperand(0); 5829 if (Op.isRegister()) { 5830 AM.BaseType = X86AddressMode::RegBase; 5831 AM.Base.Reg = Op.getReg(); 5832 } else { 5833 AM.BaseType = X86AddressMode::FrameIndexBase; 5834 AM.Base.FrameIndex = Op.getIndex(); 5835 } 5836 Op = MI->getOperand(1); 5837 if (Op.isImmediate()) 5838 AM.Scale = Op.getImm(); 5839 Op = MI->getOperand(2); 5840 if (Op.isImmediate()) 5841 AM.IndexReg = Op.getImm(); 5842 Op = MI->getOperand(3); 5843 if (Op.isGlobalAddress()) { 5844 AM.GV = Op.getGlobal(); 5845 } else { 5846 AM.Disp = Op.getImm(); 5847 } 5848 addFullAddress(BuildMI(BB, TII->get(Opc)), AM) 5849 .addReg(MI->getOperand(4).getReg()); 5850 5851 // Reload the original control word now. 5852 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5853 5854 delete MI; // The pseudo instruction is gone now. 5855 return BB; 5856 } 5857 } 5858} 5859 5860//===----------------------------------------------------------------------===// 5861// X86 Optimization Hooks 5862//===----------------------------------------------------------------------===// 5863 5864void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 5865 const APInt &Mask, 5866 APInt &KnownZero, 5867 APInt &KnownOne, 5868 const SelectionDAG &DAG, 5869 unsigned Depth) const { 5870 unsigned Opc = Op.getOpcode(); 5871 assert((Opc >= ISD::BUILTIN_OP_END || 5872 Opc == ISD::INTRINSIC_WO_CHAIN || 5873 Opc == ISD::INTRINSIC_W_CHAIN || 5874 Opc == ISD::INTRINSIC_VOID) && 5875 "Should use MaskedValueIsZero if you don't know whether Op" 5876 " is a target node!"); 5877 5878 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything. 5879 switch (Opc) { 5880 default: break; 5881 case X86ISD::SETCC: 5882 KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(), 5883 Mask.getBitWidth() - 1); 5884 break; 5885 } 5886} 5887 5888/// getShuffleScalarElt - Returns the scalar element that will make up the ith 5889/// element of the result of the vector shuffle. 5890static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) { 5891 MVT::ValueType VT = N->getValueType(0); 5892 SDOperand PermMask = N->getOperand(2); 5893 unsigned NumElems = PermMask.getNumOperands(); 5894 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1); 5895 i %= NumElems; 5896 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5897 return (i == 0) 5898 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5899 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) { 5900 SDOperand Idx = PermMask.getOperand(i); 5901 if (Idx.getOpcode() == ISD::UNDEF) 5902 return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5903 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG); 5904 } 5905 return SDOperand(); 5906} 5907 5908/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 5909/// node is a GlobalAddress + an offset. 5910static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) { 5911 unsigned Opc = N->getOpcode(); 5912 if (Opc == X86ISD::Wrapper) { 5913 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) { 5914 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 5915 return true; 5916 } 5917 } else if (Opc == ISD::ADD) { 5918 SDOperand N1 = N->getOperand(0); 5919 SDOperand N2 = N->getOperand(1); 5920 if (isGAPlusOffset(N1.Val, GA, Offset)) { 5921 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 5922 if (V) { 5923 Offset += V->getSignExtended(); 5924 return true; 5925 } 5926 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 5927 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 5928 if (V) { 5929 Offset += V->getSignExtended(); 5930 return true; 5931 } 5932 } 5933 } 5934 return false; 5935} 5936 5937/// isConsecutiveLoad - Returns true if N is loading from an address of Base 5938/// + Dist * Size. 5939static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size, 5940 MachineFrameInfo *MFI) { 5941 if (N->getOperand(0).Val != Base->getOperand(0).Val) 5942 return false; 5943 5944 SDOperand Loc = N->getOperand(1); 5945 SDOperand BaseLoc = Base->getOperand(1); 5946 if (Loc.getOpcode() == ISD::FrameIndex) { 5947 if (BaseLoc.getOpcode() != ISD::FrameIndex) 5948 return false; 5949 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 5950 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 5951 int FS = MFI->getObjectSize(FI); 5952 int BFS = MFI->getObjectSize(BFI); 5953 if (FS != BFS || FS != Size) return false; 5954 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size); 5955 } else { 5956 GlobalValue *GV1 = NULL; 5957 GlobalValue *GV2 = NULL; 5958 int64_t Offset1 = 0; 5959 int64_t Offset2 = 0; 5960 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 5961 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 5962 if (isGA1 && isGA2 && GV1 == GV2) 5963 return Offset1 == (Offset2 + Dist*Size); 5964 } 5965 5966 return false; 5967} 5968 5969static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI, 5970 const X86Subtarget *Subtarget) { 5971 GlobalValue *GV; 5972 int64_t Offset = 0; 5973 if (isGAPlusOffset(Base, GV, Offset)) 5974 return (GV->getAlignment() >= 16 && (Offset % 16) == 0); 5975 // DAG combine handles the stack object case. 5976 return false; 5977} 5978 5979 5980/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 5981/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 5982/// if the load addresses are consecutive, non-overlapping, and in the right 5983/// order. 5984static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 5985 const X86Subtarget *Subtarget) { 5986 MachineFunction &MF = DAG.getMachineFunction(); 5987 MachineFrameInfo *MFI = MF.getFrameInfo(); 5988 MVT::ValueType VT = N->getValueType(0); 5989 MVT::ValueType EVT = MVT::getVectorElementType(VT); 5990 SDOperand PermMask = N->getOperand(2); 5991 int NumElems = (int)PermMask.getNumOperands(); 5992 SDNode *Base = NULL; 5993 for (int i = 0; i < NumElems; ++i) { 5994 SDOperand Idx = PermMask.getOperand(i); 5995 if (Idx.getOpcode() == ISD::UNDEF) { 5996 if (!Base) return SDOperand(); 5997 } else { 5998 SDOperand Arg = 5999 getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG); 6000 if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val)) 6001 return SDOperand(); 6002 if (!Base) 6003 Base = Arg.Val; 6004 else if (!isConsecutiveLoad(Arg.Val, Base, 6005 i, MVT::getSizeInBits(EVT)/8,MFI)) 6006 return SDOperand(); 6007 } 6008 } 6009 6010 bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget); 6011 LoadSDNode *LD = cast<LoadSDNode>(Base); 6012 if (isAlign16) { 6013 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 6014 LD->getSrcValueOffset(), LD->isVolatile()); 6015 } else { 6016 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 6017 LD->getSrcValueOffset(), LD->isVolatile(), 6018 LD->getAlignment()); 6019 } 6020} 6021 6022/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 6023static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 6024 const X86Subtarget *Subtarget) { 6025 SDOperand Cond = N->getOperand(0); 6026 6027 // If we have SSE[12] support, try to form min/max nodes. 6028 if (Subtarget->hasSSE2() && 6029 (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) { 6030 if (Cond.getOpcode() == ISD::SETCC) { 6031 // Get the LHS/RHS of the select. 6032 SDOperand LHS = N->getOperand(1); 6033 SDOperand RHS = N->getOperand(2); 6034 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 6035 6036 unsigned Opcode = 0; 6037 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) { 6038 switch (CC) { 6039 default: break; 6040 case ISD::SETOLE: // (X <= Y) ? X : Y -> min 6041 case ISD::SETULE: 6042 case ISD::SETLE: 6043 if (!UnsafeFPMath) break; 6044 // FALL THROUGH. 6045 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min 6046 case ISD::SETLT: 6047 Opcode = X86ISD::FMIN; 6048 break; 6049 6050 case ISD::SETOGT: // (X > Y) ? X : Y -> max 6051 case ISD::SETUGT: 6052 case ISD::SETGT: 6053 if (!UnsafeFPMath) break; 6054 // FALL THROUGH. 6055 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max 6056 case ISD::SETGE: 6057 Opcode = X86ISD::FMAX; 6058 break; 6059 } 6060 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) { 6061 switch (CC) { 6062 default: break; 6063 case ISD::SETOGT: // (X > Y) ? Y : X -> min 6064 case ISD::SETUGT: 6065 case ISD::SETGT: 6066 if (!UnsafeFPMath) break; 6067 // FALL THROUGH. 6068 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min 6069 case ISD::SETGE: 6070 Opcode = X86ISD::FMIN; 6071 break; 6072 6073 case ISD::SETOLE: // (X <= Y) ? Y : X -> max 6074 case ISD::SETULE: 6075 case ISD::SETLE: 6076 if (!UnsafeFPMath) break; 6077 // FALL THROUGH. 6078 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max 6079 case ISD::SETLT: 6080 Opcode = X86ISD::FMAX; 6081 break; 6082 } 6083 } 6084 6085 if (Opcode) 6086 return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS); 6087 } 6088 6089 } 6090 6091 return SDOperand(); 6092} 6093 6094/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 6095static SDOperand PerformSTORECombine(StoreSDNode *St, SelectionDAG &DAG, 6096 const X86Subtarget *Subtarget) { 6097 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 6098 // the FP state in cases where an emms may be missing. 6099 // A preferable solution to the general problem is to figure out the right 6100 // places to insert EMMS. This qualifies as a quick hack. 6101 if (MVT::isVector(St->getValue().getValueType()) && 6102 MVT::getSizeInBits(St->getValue().getValueType()) == 64 && 6103 isa<LoadSDNode>(St->getValue()) && 6104 !cast<LoadSDNode>(St->getValue())->isVolatile() && 6105 St->getChain().hasOneUse() && !St->isVolatile()) { 6106 SDNode* LdVal = St->getValue().Val; 6107 LoadSDNode *Ld = 0; 6108 int TokenFactorIndex = -1; 6109 SmallVector<SDOperand, 8> Ops; 6110 SDNode* ChainVal = St->getChain().Val; 6111 // Must be a store of a load. We currently handle two cases: the load 6112 // is a direct child, and it's under an intervening TokenFactor. It is 6113 // possible to dig deeper under nested TokenFactors. 6114 if (ChainVal == LdVal) 6115 Ld = cast<LoadSDNode>(St->getChain()); 6116 else if (St->getValue().hasOneUse() && 6117 ChainVal->getOpcode() == ISD::TokenFactor) { 6118 for (unsigned i=0, e = ChainVal->getNumOperands(); i != e; ++i) { 6119 if (ChainVal->getOperand(i).Val == LdVal) { 6120 TokenFactorIndex = i; 6121 Ld = cast<LoadSDNode>(St->getValue()); 6122 } else 6123 Ops.push_back(ChainVal->getOperand(i)); 6124 } 6125 } 6126 if (Ld) { 6127 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 6128 if (Subtarget->is64Bit()) { 6129 SDOperand NewLd = DAG.getLoad(MVT::i64, Ld->getChain(), 6130 Ld->getBasePtr(), Ld->getSrcValue(), 6131 Ld->getSrcValueOffset(), Ld->isVolatile(), 6132 Ld->getAlignment()); 6133 SDOperand NewChain = NewLd.getValue(1); 6134 if (TokenFactorIndex != -1) { 6135 Ops.push_back(NewLd); 6136 NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0], 6137 Ops.size()); 6138 } 6139 return DAG.getStore(NewChain, NewLd, St->getBasePtr(), 6140 St->getSrcValue(), St->getSrcValueOffset(), 6141 St->isVolatile(), St->getAlignment()); 6142 } 6143 6144 // Otherwise, lower to two 32-bit copies. 6145 SDOperand LoAddr = Ld->getBasePtr(); 6146 SDOperand HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, 6147 DAG.getConstant(MVT::i32, 4)); 6148 6149 SDOperand LoLd = DAG.getLoad(MVT::i32, Ld->getChain(), LoAddr, 6150 Ld->getSrcValue(), Ld->getSrcValueOffset(), 6151 Ld->isVolatile(), Ld->getAlignment()); 6152 SDOperand HiLd = DAG.getLoad(MVT::i32, Ld->getChain(), HiAddr, 6153 Ld->getSrcValue(), Ld->getSrcValueOffset()+4, 6154 Ld->isVolatile(), 6155 MinAlign(Ld->getAlignment(), 4)); 6156 6157 SDOperand NewChain = LoLd.getValue(1); 6158 if (TokenFactorIndex != -1) { 6159 Ops.push_back(LoLd); 6160 Ops.push_back(HiLd); 6161 NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0], 6162 Ops.size()); 6163 } 6164 6165 LoAddr = St->getBasePtr(); 6166 HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, 6167 DAG.getConstant(MVT::i32, 4)); 6168 6169 SDOperand LoSt = DAG.getStore(NewChain, LoLd, LoAddr, 6170 St->getSrcValue(), St->getSrcValueOffset(), 6171 St->isVolatile(), St->getAlignment()); 6172 SDOperand HiSt = DAG.getStore(NewChain, HiLd, HiAddr, 6173 St->getSrcValue(), St->getSrcValueOffset()+4, 6174 St->isVolatile(), 6175 MinAlign(St->getAlignment(), 4)); 6176 return DAG.getNode(ISD::TokenFactor, MVT::Other, LoSt, HiSt); 6177 } 6178 } 6179 return SDOperand(); 6180} 6181 6182/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 6183/// X86ISD::FXOR nodes. 6184static SDOperand PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 6185 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 6186 // F[X]OR(0.0, x) -> x 6187 // F[X]OR(x, 0.0) -> x 6188 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 6189 if (C->getValueAPF().isPosZero()) 6190 return N->getOperand(1); 6191 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 6192 if (C->getValueAPF().isPosZero()) 6193 return N->getOperand(0); 6194 return SDOperand(); 6195} 6196 6197/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 6198static SDOperand PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 6199 // FAND(0.0, x) -> 0.0 6200 // FAND(x, 0.0) -> 0.0 6201 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 6202 if (C->getValueAPF().isPosZero()) 6203 return N->getOperand(0); 6204 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 6205 if (C->getValueAPF().isPosZero()) 6206 return N->getOperand(1); 6207 return SDOperand(); 6208} 6209 6210 6211SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, 6212 DAGCombinerInfo &DCI) const { 6213 SelectionDAG &DAG = DCI.DAG; 6214 switch (N->getOpcode()) { 6215 default: break; 6216 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, Subtarget); 6217 case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget); 6218 case ISD::STORE: 6219 return PerformSTORECombine(cast<StoreSDNode>(N), DAG, Subtarget); 6220 case X86ISD::FXOR: 6221 case X86ISD::FOR: return PerformFORCombine(N, DAG); 6222 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 6223 } 6224 6225 return SDOperand(); 6226} 6227 6228//===----------------------------------------------------------------------===// 6229// X86 Inline Assembly Support 6230//===----------------------------------------------------------------------===// 6231 6232/// getConstraintType - Given a constraint letter, return the type of 6233/// constraint it is for this target. 6234X86TargetLowering::ConstraintType 6235X86TargetLowering::getConstraintType(const std::string &Constraint) const { 6236 if (Constraint.size() == 1) { 6237 switch (Constraint[0]) { 6238 case 'A': 6239 case 'r': 6240 case 'R': 6241 case 'l': 6242 case 'q': 6243 case 'Q': 6244 case 'x': 6245 case 'Y': 6246 return C_RegisterClass; 6247 default: 6248 break; 6249 } 6250 } 6251 return TargetLowering::getConstraintType(Constraint); 6252} 6253 6254/// LowerXConstraint - try to replace an X constraint, which matches anything, 6255/// with another that has more specific requirements based on the type of the 6256/// corresponding operand. 6257void X86TargetLowering::lowerXConstraint(MVT::ValueType ConstraintVT, 6258 std::string& s) const { 6259 if (MVT::isFloatingPoint(ConstraintVT)) { 6260 if (Subtarget->hasSSE2()) 6261 s = "Y"; 6262 else if (Subtarget->hasSSE1()) 6263 s = "x"; 6264 else 6265 s = "f"; 6266 } else 6267 return TargetLowering::lowerXConstraint(ConstraintVT, s); 6268} 6269 6270/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 6271/// vector. If it is invalid, don't add anything to Ops. 6272void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, 6273 char Constraint, 6274 std::vector<SDOperand>&Ops, 6275 SelectionDAG &DAG) { 6276 SDOperand Result(0, 0); 6277 6278 switch (Constraint) { 6279 default: break; 6280 case 'I': 6281 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 6282 if (C->getValue() <= 31) { 6283 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 6284 break; 6285 } 6286 } 6287 return; 6288 case 'N': 6289 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 6290 if (C->getValue() <= 255) { 6291 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 6292 break; 6293 } 6294 } 6295 return; 6296 case 'i': { 6297 // Literal immediates are always ok. 6298 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 6299 Result = DAG.getTargetConstant(CST->getValue(), Op.getValueType()); 6300 break; 6301 } 6302 6303 // If we are in non-pic codegen mode, we allow the address of a global (with 6304 // an optional displacement) to be used with 'i'. 6305 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 6306 int64_t Offset = 0; 6307 6308 // Match either (GA) or (GA+C) 6309 if (GA) { 6310 Offset = GA->getOffset(); 6311 } else if (Op.getOpcode() == ISD::ADD) { 6312 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6313 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 6314 if (C && GA) { 6315 Offset = GA->getOffset()+C->getValue(); 6316 } else { 6317 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6318 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 6319 if (C && GA) 6320 Offset = GA->getOffset()+C->getValue(); 6321 else 6322 C = 0, GA = 0; 6323 } 6324 } 6325 6326 if (GA) { 6327 // If addressing this global requires a load (e.g. in PIC mode), we can't 6328 // match. 6329 if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(), 6330 false)) 6331 return; 6332 6333 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), 6334 Offset); 6335 Result = Op; 6336 break; 6337 } 6338 6339 // Otherwise, not valid for this mode. 6340 return; 6341 } 6342 } 6343 6344 if (Result.Val) { 6345 Ops.push_back(Result); 6346 return; 6347 } 6348 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 6349} 6350 6351std::vector<unsigned> X86TargetLowering:: 6352getRegClassForInlineAsmConstraint(const std::string &Constraint, 6353 MVT::ValueType VT) const { 6354 if (Constraint.size() == 1) { 6355 // FIXME: not handling fp-stack yet! 6356 switch (Constraint[0]) { // GCC X86 Constraint Letters 6357 default: break; // Unknown constraint letter 6358 case 'A': // EAX/EDX 6359 if (VT == MVT::i32 || VT == MVT::i64) 6360 return make_vector<unsigned>(X86::EAX, X86::EDX, 0); 6361 break; 6362 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode) 6363 case 'Q': // Q_REGS 6364 if (VT == MVT::i32) 6365 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 6366 else if (VT == MVT::i16) 6367 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 6368 else if (VT == MVT::i8) 6369 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0); 6370 else if (VT == MVT::i64) 6371 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0); 6372 break; 6373 } 6374 } 6375 6376 return std::vector<unsigned>(); 6377} 6378 6379std::pair<unsigned, const TargetRegisterClass*> 6380X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 6381 MVT::ValueType VT) const { 6382 // First, see if this is a constraint that directly corresponds to an LLVM 6383 // register class. 6384 if (Constraint.size() == 1) { 6385 // GCC Constraint Letters 6386 switch (Constraint[0]) { 6387 default: break; 6388 case 'r': // GENERAL_REGS 6389 case 'R': // LEGACY_REGS 6390 case 'l': // INDEX_REGS 6391 if (VT == MVT::i64 && Subtarget->is64Bit()) 6392 return std::make_pair(0U, X86::GR64RegisterClass); 6393 if (VT == MVT::i32) 6394 return std::make_pair(0U, X86::GR32RegisterClass); 6395 else if (VT == MVT::i16) 6396 return std::make_pair(0U, X86::GR16RegisterClass); 6397 else if (VT == MVT::i8) 6398 return std::make_pair(0U, X86::GR8RegisterClass); 6399 break; 6400 case 'y': // MMX_REGS if MMX allowed. 6401 if (!Subtarget->hasMMX()) break; 6402 return std::make_pair(0U, X86::VR64RegisterClass); 6403 break; 6404 case 'Y': // SSE_REGS if SSE2 allowed 6405 if (!Subtarget->hasSSE2()) break; 6406 // FALL THROUGH. 6407 case 'x': // SSE_REGS if SSE1 allowed 6408 if (!Subtarget->hasSSE1()) break; 6409 6410 switch (VT) { 6411 default: break; 6412 // Scalar SSE types. 6413 case MVT::f32: 6414 case MVT::i32: 6415 return std::make_pair(0U, X86::FR32RegisterClass); 6416 case MVT::f64: 6417 case MVT::i64: 6418 return std::make_pair(0U, X86::FR64RegisterClass); 6419 // Vector types. 6420 case MVT::v16i8: 6421 case MVT::v8i16: 6422 case MVT::v4i32: 6423 case MVT::v2i64: 6424 case MVT::v4f32: 6425 case MVT::v2f64: 6426 return std::make_pair(0U, X86::VR128RegisterClass); 6427 } 6428 break; 6429 } 6430 } 6431 6432 // Use the default implementation in TargetLowering to convert the register 6433 // constraint into a member of a register class. 6434 std::pair<unsigned, const TargetRegisterClass*> Res; 6435 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 6436 6437 // Not found as a standard register? 6438 if (Res.second == 0) { 6439 // GCC calls "st(0)" just plain "st". 6440 if (StringsEqualNoCase("{st}", Constraint)) { 6441 Res.first = X86::ST0; 6442 Res.second = X86::RFP80RegisterClass; 6443 } 6444 6445 return Res; 6446 } 6447 6448 // Otherwise, check to see if this is a register class of the wrong value 6449 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 6450 // turn into {ax},{dx}. 6451 if (Res.second->hasType(VT)) 6452 return Res; // Correct type already, nothing to do. 6453 6454 // All of the single-register GCC register classes map their values onto 6455 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 6456 // really want an 8-bit or 32-bit register, map to the appropriate register 6457 // class and return the appropriate register. 6458 if (Res.second != X86::GR16RegisterClass) 6459 return Res; 6460 6461 if (VT == MVT::i8) { 6462 unsigned DestReg = 0; 6463 switch (Res.first) { 6464 default: break; 6465 case X86::AX: DestReg = X86::AL; break; 6466 case X86::DX: DestReg = X86::DL; break; 6467 case X86::CX: DestReg = X86::CL; break; 6468 case X86::BX: DestReg = X86::BL; break; 6469 } 6470 if (DestReg) { 6471 Res.first = DestReg; 6472 Res.second = Res.second = X86::GR8RegisterClass; 6473 } 6474 } else if (VT == MVT::i32) { 6475 unsigned DestReg = 0; 6476 switch (Res.first) { 6477 default: break; 6478 case X86::AX: DestReg = X86::EAX; break; 6479 case X86::DX: DestReg = X86::EDX; break; 6480 case X86::CX: DestReg = X86::ECX; break; 6481 case X86::BX: DestReg = X86::EBX; break; 6482 case X86::SI: DestReg = X86::ESI; break; 6483 case X86::DI: DestReg = X86::EDI; break; 6484 case X86::BP: DestReg = X86::EBP; break; 6485 case X86::SP: DestReg = X86::ESP; break; 6486 } 6487 if (DestReg) { 6488 Res.first = DestReg; 6489 Res.second = Res.second = X86::GR32RegisterClass; 6490 } 6491 } else if (VT == MVT::i64) { 6492 unsigned DestReg = 0; 6493 switch (Res.first) { 6494 default: break; 6495 case X86::AX: DestReg = X86::RAX; break; 6496 case X86::DX: DestReg = X86::RDX; break; 6497 case X86::CX: DestReg = X86::RCX; break; 6498 case X86::BX: DestReg = X86::RBX; break; 6499 case X86::SI: DestReg = X86::RSI; break; 6500 case X86::DI: DestReg = X86::RDI; break; 6501 case X86::BP: DestReg = X86::RBP; break; 6502 case X86::SP: DestReg = X86::RSP; break; 6503 } 6504 if (DestReg) { 6505 Res.first = DestReg; 6506 Res.second = Res.second = X86::GR64RegisterClass; 6507 } 6508 } 6509 6510 return Res; 6511} 6512