X86ISelLowering.cpp revision a844bdeab31ef04221e7ef59a8467893584cc14d
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86ISelLowering.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86TargetMachine.h" 20#include "llvm/CallingConv.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/ADT/BitVector.h" 27#include "llvm/ADT/VectorExtras.h" 28#include "llvm/Analysis/ScalarEvolutionExpressions.h" 29#include "llvm/CodeGen/CallingConvLower.h" 30#include "llvm/CodeGen/MachineFrameInfo.h" 31#include "llvm/CodeGen/MachineFunction.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/MachineModuleInfo.h" 34#include "llvm/CodeGen/MachineRegisterInfo.h" 35#include "llvm/CodeGen/SelectionDAG.h" 36#include "llvm/Support/MathExtras.h" 37#include "llvm/Support/Debug.h" 38#include "llvm/Target/TargetOptions.h" 39#include "llvm/ADT/SmallSet.h" 40#include "llvm/ADT/StringExtras.h" 41#include "llvm/ParameterAttributes.h" 42using namespace llvm; 43 44X86TargetLowering::X86TargetLowering(TargetMachine &TM) 45 : TargetLowering(TM) { 46 Subtarget = &TM.getSubtarget<X86Subtarget>(); 47 X86ScalarSSEf64 = Subtarget->hasSSE2(); 48 X86ScalarSSEf32 = Subtarget->hasSSE1(); 49 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 50 51 bool Fast = false; 52 53 RegInfo = TM.getRegisterInfo(); 54 55 // Set up the TargetLowering object. 56 57 // X86 is weird, it always uses i8 for shift amounts and setcc results. 58 setShiftAmountType(MVT::i8); 59 setSetCCResultType(MVT::i8); 60 setSetCCResultContents(ZeroOrOneSetCCResult); 61 setSchedulingPreference(SchedulingForRegPressure); 62 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0 63 setStackPointerRegisterToSaveRestore(X86StackPtr); 64 65 if (Subtarget->isTargetDarwin()) { 66 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 67 setUseUnderscoreSetJmp(false); 68 setUseUnderscoreLongJmp(false); 69 } else if (Subtarget->isTargetMingw()) { 70 // MS runtime is weird: it exports _setjmp, but longjmp! 71 setUseUnderscoreSetJmp(true); 72 setUseUnderscoreLongJmp(false); 73 } else { 74 setUseUnderscoreSetJmp(true); 75 setUseUnderscoreLongJmp(true); 76 } 77 78 // Set up the register classes. 79 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 80 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 81 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 82 if (Subtarget->is64Bit()) 83 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 84 85 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote); 86 87 // We don't accept any truncstore of integer registers. 88 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 89 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 90 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 91 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 92 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 93 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 94 95 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 96 // operation. 97 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 98 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 99 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 100 101 if (Subtarget->is64Bit()) { 102 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 103 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 104 } else { 105 if (X86ScalarSSEf64) 106 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP. 107 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand); 108 else 109 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 110 } 111 112 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 113 // this operation. 114 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 115 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 116 // SSE has no i16 to fp conversion, only i32 117 if (X86ScalarSSEf32) { 118 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 119 // f32 and f64 cases are Legal, f80 case is not 120 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 121 } else { 122 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 123 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 124 } 125 126 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 127 // are Legal, f80 is custom lowered. 128 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 129 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 130 131 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 132 // this operation. 133 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 134 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 135 136 if (X86ScalarSSEf32) { 137 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 138 // f32 and f64 cases are Legal, f80 case is not 139 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 140 } else { 141 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 142 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 143 } 144 145 // Handle FP_TO_UINT by promoting the destination to a larger signed 146 // conversion. 147 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 148 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 149 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 150 151 if (Subtarget->is64Bit()) { 152 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 153 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 154 } else { 155 if (X86ScalarSSEf32 && !Subtarget->hasSSE3()) 156 // Expand FP_TO_UINT into a select. 157 // FIXME: We would like to use a Custom expander here eventually to do 158 // the optimal thing for SSE vs. the default expansion in the legalizer. 159 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 160 else 161 // With SSE3 we can use fisttpll to convert to a signed i64. 162 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 163 } 164 165 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 166 if (!X86ScalarSSEf64) { 167 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 168 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 169 } 170 171 // Scalar integer multiply, multiply-high, divide, and remainder are 172 // lowered to use operations that produce two results, to match the 173 // available instructions. This exposes the two-result form to trivial 174 // CSE, which is able to combine x/y and x%y into a single instruction, 175 // for example. The single-result multiply instructions are introduced 176 // in X86ISelDAGToDAG.cpp, after CSE, for uses where the the high part 177 // is not needed. 178 setOperationAction(ISD::MUL , MVT::i8 , Expand); 179 setOperationAction(ISD::MULHS , MVT::i8 , Expand); 180 setOperationAction(ISD::MULHU , MVT::i8 , Expand); 181 setOperationAction(ISD::SDIV , MVT::i8 , Expand); 182 setOperationAction(ISD::UDIV , MVT::i8 , Expand); 183 setOperationAction(ISD::SREM , MVT::i8 , Expand); 184 setOperationAction(ISD::UREM , MVT::i8 , Expand); 185 setOperationAction(ISD::MUL , MVT::i16 , Expand); 186 setOperationAction(ISD::MULHS , MVT::i16 , Expand); 187 setOperationAction(ISD::MULHU , MVT::i16 , Expand); 188 setOperationAction(ISD::SDIV , MVT::i16 , Expand); 189 setOperationAction(ISD::UDIV , MVT::i16 , Expand); 190 setOperationAction(ISD::SREM , MVT::i16 , Expand); 191 setOperationAction(ISD::UREM , MVT::i16 , Expand); 192 setOperationAction(ISD::MUL , MVT::i32 , Expand); 193 setOperationAction(ISD::MULHS , MVT::i32 , Expand); 194 setOperationAction(ISD::MULHU , MVT::i32 , Expand); 195 setOperationAction(ISD::SDIV , MVT::i32 , Expand); 196 setOperationAction(ISD::UDIV , MVT::i32 , Expand); 197 setOperationAction(ISD::SREM , MVT::i32 , Expand); 198 setOperationAction(ISD::UREM , MVT::i32 , Expand); 199 setOperationAction(ISD::MUL , MVT::i64 , Expand); 200 setOperationAction(ISD::MULHS , MVT::i64 , Expand); 201 setOperationAction(ISD::MULHU , MVT::i64 , Expand); 202 setOperationAction(ISD::SDIV , MVT::i64 , Expand); 203 setOperationAction(ISD::UDIV , MVT::i64 , Expand); 204 setOperationAction(ISD::SREM , MVT::i64 , Expand); 205 setOperationAction(ISD::UREM , MVT::i64 , Expand); 206 207 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 208 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 209 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 210 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 211 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); 212 if (Subtarget->is64Bit()) 213 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 214 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 215 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 216 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 217 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 218 setOperationAction(ISD::FREM , MVT::f64 , Expand); 219 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 220 221 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 222 setOperationAction(ISD::CTTZ , MVT::i8 , Custom); 223 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 224 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 225 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 226 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 227 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 228 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 229 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 230 if (Subtarget->is64Bit()) { 231 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 232 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 233 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 234 } 235 236 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 237 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 238 239 // These should be promoted to a larger select which is supported. 240 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 241 setOperationAction(ISD::SELECT , MVT::i8 , Promote); 242 // X86 wants to expand cmov itself. 243 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 244 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 245 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 246 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 247 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 248 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 249 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 250 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 251 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 252 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 253 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 254 if (Subtarget->is64Bit()) { 255 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 256 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 257 } 258 // X86 ret instruction may pop stack. 259 setOperationAction(ISD::RET , MVT::Other, Custom); 260 if (!Subtarget->is64Bit()) 261 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 262 263 // Darwin ABI issue. 264 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 265 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 266 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 267 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 268 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 269 if (Subtarget->is64Bit()) { 270 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 271 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 272 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 273 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 274 } 275 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 276 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 277 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 278 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 279 // X86 wants to expand memset / memcpy itself. 280 setOperationAction(ISD::MEMSET , MVT::Other, Custom); 281 setOperationAction(ISD::MEMCPY , MVT::Other, Custom); 282 283 // Use the default ISD::LOCATION, ISD::DECLARE expansion. 284 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 285 // FIXME - use subtarget debug flags 286 if (!Subtarget->isTargetDarwin() && 287 !Subtarget->isTargetELF() && 288 !Subtarget->isTargetCygMing()) 289 setOperationAction(ISD::LABEL, MVT::Other, Expand); 290 291 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 292 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 293 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 294 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 295 if (Subtarget->is64Bit()) { 296 // FIXME: Verify 297 setExceptionPointerRegister(X86::RAX); 298 setExceptionSelectorRegister(X86::RDX); 299 } else { 300 setExceptionPointerRegister(X86::EAX); 301 setExceptionSelectorRegister(X86::EDX); 302 } 303 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 304 305 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 306 307 setOperationAction(ISD::TRAP, MVT::Other, Legal); 308 309 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 310 setOperationAction(ISD::VASTART , MVT::Other, Custom); 311 setOperationAction(ISD::VAARG , MVT::Other, Expand); 312 setOperationAction(ISD::VAEND , MVT::Other, Expand); 313 if (Subtarget->is64Bit()) 314 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 315 else 316 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 317 318 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 319 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 320 if (Subtarget->is64Bit()) 321 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 322 if (Subtarget->isTargetCygMing()) 323 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 324 else 325 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 326 327 if (X86ScalarSSEf64) { 328 // f32 and f64 use SSE. 329 // Set up the FP register classes. 330 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 331 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 332 333 // Use ANDPD to simulate FABS. 334 setOperationAction(ISD::FABS , MVT::f64, Custom); 335 setOperationAction(ISD::FABS , MVT::f32, Custom); 336 337 // Use XORP to simulate FNEG. 338 setOperationAction(ISD::FNEG , MVT::f64, Custom); 339 setOperationAction(ISD::FNEG , MVT::f32, Custom); 340 341 // Use ANDPD and ORPD to simulate FCOPYSIGN. 342 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 343 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 344 345 // We don't support sin/cos/fmod 346 setOperationAction(ISD::FSIN , MVT::f64, Expand); 347 setOperationAction(ISD::FCOS , MVT::f64, Expand); 348 setOperationAction(ISD::FREM , MVT::f64, Expand); 349 setOperationAction(ISD::FSIN , MVT::f32, Expand); 350 setOperationAction(ISD::FCOS , MVT::f32, Expand); 351 setOperationAction(ISD::FREM , MVT::f32, Expand); 352 353 // Expand FP immediates into loads from the stack, except for the special 354 // cases we handle. 355 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 356 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 357 addLegalFPImmediate(APFloat(+0.0)); // xorpd 358 addLegalFPImmediate(APFloat(+0.0f)); // xorps 359 360 // Floating truncations from f80 and extensions to f80 go through memory. 361 // If optimizing, we lie about this though and handle it in 362 // InstructionSelectPreprocess so that dagcombine2 can hack on these. 363 if (Fast) { 364 setConvertAction(MVT::f32, MVT::f80, Expand); 365 setConvertAction(MVT::f64, MVT::f80, Expand); 366 setConvertAction(MVT::f80, MVT::f32, Expand); 367 setConvertAction(MVT::f80, MVT::f64, Expand); 368 } 369 } else if (X86ScalarSSEf32) { 370 // Use SSE for f32, x87 for f64. 371 // Set up the FP register classes. 372 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 373 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 374 375 // Use ANDPS to simulate FABS. 376 setOperationAction(ISD::FABS , MVT::f32, Custom); 377 378 // Use XORP to simulate FNEG. 379 setOperationAction(ISD::FNEG , MVT::f32, Custom); 380 381 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 382 383 // Use ANDPS and ORPS to simulate FCOPYSIGN. 384 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 385 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 386 387 // We don't support sin/cos/fmod 388 setOperationAction(ISD::FSIN , MVT::f32, Expand); 389 setOperationAction(ISD::FCOS , MVT::f32, Expand); 390 setOperationAction(ISD::FREM , MVT::f32, Expand); 391 392 // Expand FP immediates into loads from the stack, except for the special 393 // cases we handle. 394 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 395 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 396 addLegalFPImmediate(APFloat(+0.0f)); // xorps 397 addLegalFPImmediate(APFloat(+0.0)); // FLD0 398 addLegalFPImmediate(APFloat(+1.0)); // FLD1 399 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 400 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 401 402 // SSE <-> X87 conversions go through memory. If optimizing, we lie about 403 // this though and handle it in InstructionSelectPreprocess so that 404 // dagcombine2 can hack on these. 405 if (Fast) { 406 setConvertAction(MVT::f32, MVT::f64, Expand); 407 setConvertAction(MVT::f32, MVT::f80, Expand); 408 setConvertAction(MVT::f80, MVT::f32, Expand); 409 setConvertAction(MVT::f64, MVT::f32, Expand); 410 // And x87->x87 truncations also. 411 setConvertAction(MVT::f80, MVT::f64, Expand); 412 } 413 414 if (!UnsafeFPMath) { 415 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 416 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 417 } 418 } else { 419 // f32 and f64 in x87. 420 // Set up the FP register classes. 421 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 422 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 423 424 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 425 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 426 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 427 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 428 429 // Floating truncations go through memory. If optimizing, we lie about 430 // this though and handle it in InstructionSelectPreprocess so that 431 // dagcombine2 can hack on these. 432 if (Fast) { 433 setConvertAction(MVT::f80, MVT::f32, Expand); 434 setConvertAction(MVT::f64, MVT::f32, Expand); 435 setConvertAction(MVT::f80, MVT::f64, Expand); 436 } 437 438 if (!UnsafeFPMath) { 439 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 440 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 441 } 442 443 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 444 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 445 addLegalFPImmediate(APFloat(+0.0)); // FLD0 446 addLegalFPImmediate(APFloat(+1.0)); // FLD1 447 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 448 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 449 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 450 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 451 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 452 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 453 } 454 455 // Long double always uses X87. 456 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 457 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 458 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 459 { 460 setOperationAction(ISD::ConstantFP, MVT::f80, Expand); 461 APFloat TmpFlt(+0.0); 462 TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 463 addLegalFPImmediate(TmpFlt); // FLD0 464 TmpFlt.changeSign(); 465 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 466 APFloat TmpFlt2(+1.0); 467 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 468 addLegalFPImmediate(TmpFlt2); // FLD1 469 TmpFlt2.changeSign(); 470 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 471 } 472 473 if (!UnsafeFPMath) { 474 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 475 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 476 } 477 478 // Always use a library call for pow. 479 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 480 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 481 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 482 483 // First set operation action for all vector types to expand. Then we 484 // will selectively turn on ones that can be effectively codegen'd. 485 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 486 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 487 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); 488 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); 489 setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); 490 setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); 491 setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); 492 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 493 setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); 494 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 495 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 496 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 497 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 498 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 499 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); 500 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); 501 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 502 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 503 setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand); 504 setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand); 505 setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand); 506 setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand); 507 setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand); 508 setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand); 509 setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand); 510 setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand); 511 setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand); 512 setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand); 513 setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand); 514 setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand); 515 setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand); 516 setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand); 517 setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand); 518 setOperationAction(ISD::SHL, (MVT::ValueType)VT, Expand); 519 setOperationAction(ISD::SRA, (MVT::ValueType)VT, Expand); 520 setOperationAction(ISD::SRL, (MVT::ValueType)VT, Expand); 521 setOperationAction(ISD::ROTL, (MVT::ValueType)VT, Expand); 522 setOperationAction(ISD::ROTR, (MVT::ValueType)VT, Expand); 523 setOperationAction(ISD::BSWAP, (MVT::ValueType)VT, Expand); 524 } 525 526 if (Subtarget->hasMMX()) { 527 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass); 528 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass); 529 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass); 530 addRegisterClass(MVT::v1i64, X86::VR64RegisterClass); 531 532 // FIXME: add MMX packed arithmetics 533 534 setOperationAction(ISD::ADD, MVT::v8i8, Legal); 535 setOperationAction(ISD::ADD, MVT::v4i16, Legal); 536 setOperationAction(ISD::ADD, MVT::v2i32, Legal); 537 setOperationAction(ISD::ADD, MVT::v1i64, Legal); 538 539 setOperationAction(ISD::SUB, MVT::v8i8, Legal); 540 setOperationAction(ISD::SUB, MVT::v4i16, Legal); 541 setOperationAction(ISD::SUB, MVT::v2i32, Legal); 542 setOperationAction(ISD::SUB, MVT::v1i64, Legal); 543 544 setOperationAction(ISD::MULHS, MVT::v4i16, Legal); 545 setOperationAction(ISD::MUL, MVT::v4i16, Legal); 546 547 setOperationAction(ISD::AND, MVT::v8i8, Promote); 548 AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64); 549 setOperationAction(ISD::AND, MVT::v4i16, Promote); 550 AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64); 551 setOperationAction(ISD::AND, MVT::v2i32, Promote); 552 AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64); 553 setOperationAction(ISD::AND, MVT::v1i64, Legal); 554 555 setOperationAction(ISD::OR, MVT::v8i8, Promote); 556 AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64); 557 setOperationAction(ISD::OR, MVT::v4i16, Promote); 558 AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64); 559 setOperationAction(ISD::OR, MVT::v2i32, Promote); 560 AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64); 561 setOperationAction(ISD::OR, MVT::v1i64, Legal); 562 563 setOperationAction(ISD::XOR, MVT::v8i8, Promote); 564 AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64); 565 setOperationAction(ISD::XOR, MVT::v4i16, Promote); 566 AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64); 567 setOperationAction(ISD::XOR, MVT::v2i32, Promote); 568 AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64); 569 setOperationAction(ISD::XOR, MVT::v1i64, Legal); 570 571 setOperationAction(ISD::LOAD, MVT::v8i8, Promote); 572 AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64); 573 setOperationAction(ISD::LOAD, MVT::v4i16, Promote); 574 AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64); 575 setOperationAction(ISD::LOAD, MVT::v2i32, Promote); 576 AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64); 577 setOperationAction(ISD::LOAD, MVT::v1i64, Legal); 578 579 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); 580 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 581 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); 582 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); 583 584 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 585 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 586 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); 587 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); 588 589 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom); 590 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom); 591 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Custom); 592 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom); 593 } 594 595 if (Subtarget->hasSSE1()) { 596 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 597 598 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 599 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 600 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 601 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 602 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 603 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 604 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 605 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 606 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 607 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 608 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 609 } 610 611 if (Subtarget->hasSSE2()) { 612 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 613 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 614 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 615 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 616 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 617 618 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 619 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 620 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 621 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 622 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 623 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 624 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 625 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 626 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 627 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 628 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 629 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 630 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 631 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 632 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 633 634 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 635 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 636 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 637 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 638 // Implement v4f32 insert_vector_elt in terms of SSE2 v8i16 ones. 639 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 640 641 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 642 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 643 // Do not attempt to custom lower non-power-of-2 vectors 644 if (!isPowerOf2_32(MVT::getVectorNumElements(VT))) 645 continue; 646 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); 647 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); 648 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); 649 } 650 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 651 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 652 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 653 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 654 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 655 if (Subtarget->is64Bit()) 656 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 657 658 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 659 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 660 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); 661 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); 662 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); 663 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); 664 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); 665 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); 666 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); 667 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); 668 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 669 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); 670 } 671 672 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 673 674 // Custom lower v2i64 and v2f64 selects. 675 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 676 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 677 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 678 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 679 } 680 681 // We want to custom lower some of our intrinsics. 682 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 683 684 // We have target-specific dag combine patterns for the following nodes: 685 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 686 setTargetDAGCombine(ISD::SELECT); 687 688 computeRegisterProperties(); 689 690 // FIXME: These should be based on subtarget info. Plus, the values should 691 // be smaller when we are in optimizing for size mode. 692 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores 693 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores 694 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores 695 allowUnalignedMemoryAccesses = true; // x86 supports it! 696} 697 698/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 699/// the desired ByVal argument alignment. 700static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) { 701 if (MaxAlign == 16) 702 return; 703 if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 704 if (VTy->getBitWidth() == 128) 705 MaxAlign = 16; 706 else if (VTy->getBitWidth() == 64) 707 if (MaxAlign < 8) 708 MaxAlign = 8; 709 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 710 unsigned EltAlign = 0; 711 getMaxByValAlign(ATy->getElementType(), EltAlign); 712 if (EltAlign > MaxAlign) 713 MaxAlign = EltAlign; 714 } else if (const StructType *STy = dyn_cast<StructType>(Ty)) { 715 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 716 unsigned EltAlign = 0; 717 getMaxByValAlign(STy->getElementType(i), EltAlign); 718 if (EltAlign > MaxAlign) 719 MaxAlign = EltAlign; 720 if (MaxAlign == 16) 721 break; 722 } 723 } 724 return; 725} 726 727/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 728/// function arguments in the caller parameter area. For X86, aggregates 729/// that contains are placed at 16-byte boundaries while the rest are at 730/// 4-byte boundaries. 731unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const { 732 if (Subtarget->is64Bit()) 733 return getTargetData()->getABITypeAlignment(Ty); 734 unsigned Align = 4; 735 getMaxByValAlign(Ty, Align); 736 return Align; 737} 738 739/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 740/// jumptable. 741SDOperand X86TargetLowering::getPICJumpTableRelocBase(SDOperand Table, 742 SelectionDAG &DAG) const { 743 if (usesGlobalOffsetTable()) 744 return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy()); 745 if (!Subtarget->isPICStyleRIPRel()) 746 return DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()); 747 return Table; 748} 749 750//===----------------------------------------------------------------------===// 751// Return Value Calling Convention Implementation 752//===----------------------------------------------------------------------===// 753 754#include "X86GenCallingConv.inc" 755 756/// GetPossiblePreceedingTailCall - Get preceeding X86ISD::TAILCALL node if it 757/// exists skip possible ISD:TokenFactor. 758static SDOperand GetPossiblePreceedingTailCall(SDOperand Chain) { 759 if (Chain.getOpcode() == X86ISD::TAILCALL) { 760 return Chain; 761 } else if (Chain.getOpcode() == ISD::TokenFactor) { 762 if (Chain.getNumOperands() && 763 Chain.getOperand(0).getOpcode() == X86ISD::TAILCALL) 764 return Chain.getOperand(0); 765 } 766 return Chain; 767} 768 769/// LowerRET - Lower an ISD::RET node. 770SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { 771 assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args"); 772 773 SmallVector<CCValAssign, 16> RVLocs; 774 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 775 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 776 CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs); 777 CCInfo.AnalyzeReturn(Op.Val, RetCC_X86); 778 779 // If this is the first return lowered for this function, add the regs to the 780 // liveout set for the function. 781 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 782 for (unsigned i = 0; i != RVLocs.size(); ++i) 783 if (RVLocs[i].isRegLoc()) 784 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 785 } 786 SDOperand Chain = Op.getOperand(0); 787 788 // Handle tail call return. 789 Chain = GetPossiblePreceedingTailCall(Chain); 790 if (Chain.getOpcode() == X86ISD::TAILCALL) { 791 SDOperand TailCall = Chain; 792 SDOperand TargetAddress = TailCall.getOperand(1); 793 SDOperand StackAdjustment = TailCall.getOperand(2); 794 assert(((TargetAddress.getOpcode() == ISD::Register && 795 (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::ECX || 796 cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) || 797 TargetAddress.getOpcode() == ISD::TargetExternalSymbol || 798 TargetAddress.getOpcode() == ISD::TargetGlobalAddress) && 799 "Expecting an global address, external symbol, or register"); 800 assert(StackAdjustment.getOpcode() == ISD::Constant && 801 "Expecting a const value"); 802 803 SmallVector<SDOperand,8> Operands; 804 Operands.push_back(Chain.getOperand(0)); 805 Operands.push_back(TargetAddress); 806 Operands.push_back(StackAdjustment); 807 // Copy registers used by the call. Last operand is a flag so it is not 808 // copied. 809 for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) { 810 Operands.push_back(Chain.getOperand(i)); 811 } 812 return DAG.getNode(X86ISD::TC_RETURN, MVT::Other, &Operands[0], 813 Operands.size()); 814 } 815 816 // Regular return. 817 SDOperand Flag; 818 819 // Copy the result values into the output registers. 820 if (RVLocs.size() != 1 || !RVLocs[0].isRegLoc() || 821 RVLocs[0].getLocReg() != X86::ST0) { 822 for (unsigned i = 0; i != RVLocs.size(); ++i) { 823 CCValAssign &VA = RVLocs[i]; 824 assert(VA.isRegLoc() && "Can only return in registers!"); 825 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1), 826 Flag); 827 Flag = Chain.getValue(1); 828 } 829 } else { 830 // We need to handle a destination of ST0 specially, because it isn't really 831 // a register. 832 SDOperand Value = Op.getOperand(1); 833 834 // an XMM register onto the fp-stack. Do this with an FP_EXTEND to f80. 835 // This will get legalized into a load/store if it can't get optimized away. 836 if (isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) 837 Value = DAG.getNode(ISD::FP_EXTEND, MVT::f80, Value); 838 839 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 840 SDOperand Ops[] = { Chain, Value }; 841 Chain = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops, 2); 842 Flag = Chain.getValue(1); 843 } 844 845 SDOperand BytesToPop = DAG.getConstant(getBytesToPopOnReturn(), MVT::i16); 846 if (Flag.Val) 847 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop, Flag); 848 else 849 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop); 850} 851 852 853/// LowerCallResult - Lower the result values of an ISD::CALL into the 854/// appropriate copies out of appropriate physical registers. This assumes that 855/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call 856/// being lowered. The returns a SDNode with the same number of values as the 857/// ISD::CALL. 858SDNode *X86TargetLowering:: 859LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, 860 unsigned CallingConv, SelectionDAG &DAG) { 861 862 // Assign locations to each value returned by this call. 863 SmallVector<CCValAssign, 16> RVLocs; 864 bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0; 865 CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs); 866 CCInfo.AnalyzeCallResult(TheCall, RetCC_X86); 867 868 SmallVector<SDOperand, 8> ResultVals; 869 870 // Copy all of the result registers out of their specified physreg. 871 if (RVLocs.size() != 1 || RVLocs[0].getLocReg() != X86::ST0) { 872 for (unsigned i = 0; i != RVLocs.size(); ++i) { 873 Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(), 874 RVLocs[i].getValVT(), InFlag).getValue(1); 875 InFlag = Chain.getValue(2); 876 ResultVals.push_back(Chain.getValue(0)); 877 } 878 } else { 879 // Copies from the FP stack are special, as ST0 isn't a valid register 880 // before the fp stackifier runs. 881 882 // Copy ST0 into an RFP register with FP_GET_RESULT. If this will end up 883 // in an SSE register, copy it out as F80 and do a truncate, otherwise use 884 // the specified value type. 885 MVT::ValueType GetResultTy = RVLocs[0].getValVT(); 886 if (isScalarFPTypeInSSEReg(GetResultTy)) 887 GetResultTy = MVT::f80; 888 SDVTList Tys = DAG.getVTList(GetResultTy, MVT::Other, MVT::Flag); 889 890 SDOperand GROps[] = { Chain, InFlag }; 891 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, GROps, 2); 892 Chain = RetVal.getValue(1); 893 InFlag = RetVal.getValue(2); 894 895 // If we want the result in an SSE register, use an FP_TRUNCATE to get it 896 // there. 897 if (GetResultTy != RVLocs[0].getValVT()) 898 RetVal = DAG.getNode(ISD::FP_ROUND, RVLocs[0].getValVT(), RetVal, 899 // This truncation won't change the value. 900 DAG.getIntPtrConstant(1)); 901 902 ResultVals.push_back(RetVal); 903 } 904 905 // Merge everything together with a MERGE_VALUES node. 906 ResultVals.push_back(Chain); 907 return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(), 908 &ResultVals[0], ResultVals.size()).Val; 909} 910 911/// LowerCallResultToTwo64BitRegs - Lower the result values of an x86-64 912/// ISD::CALL where the results are known to be in two 64-bit registers, 913/// e.g. XMM0 and XMM1. This simplify store the two values back to the 914/// fixed stack slot allocated for StructRet. 915SDNode *X86TargetLowering:: 916LowerCallResultToTwo64BitRegs(SDOperand Chain, SDOperand InFlag, 917 SDNode *TheCall, unsigned Reg1, unsigned Reg2, 918 MVT::ValueType VT, SelectionDAG &DAG) { 919 SDOperand RetVal1 = DAG.getCopyFromReg(Chain, Reg1, VT, InFlag); 920 Chain = RetVal1.getValue(1); 921 InFlag = RetVal1.getValue(2); 922 SDOperand RetVal2 = DAG.getCopyFromReg(Chain, Reg2, VT, InFlag); 923 Chain = RetVal2.getValue(1); 924 InFlag = RetVal2.getValue(2); 925 SDOperand FIN = TheCall->getOperand(5); 926 Chain = DAG.getStore(Chain, RetVal1, FIN, NULL, 0); 927 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); 928 Chain = DAG.getStore(Chain, RetVal2, FIN, NULL, 0); 929 return Chain.Val; 930} 931 932/// LowerCallResultToTwoX87Regs - Lower the result values of an x86-64 ISD::CALL 933/// where the results are known to be in ST0 and ST1. 934SDNode *X86TargetLowering:: 935LowerCallResultToTwoX87Regs(SDOperand Chain, SDOperand InFlag, 936 SDNode *TheCall, SelectionDAG &DAG) { 937 SmallVector<SDOperand, 8> ResultVals; 938 const MVT::ValueType VTs[] = { MVT::f80, MVT::f80, MVT::Other, MVT::Flag }; 939 SDVTList Tys = DAG.getVTList(VTs, 4); 940 SDOperand Ops[] = { Chain, InFlag }; 941 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT2, Tys, Ops, 2); 942 Chain = RetVal.getValue(2); 943 SDOperand FIN = TheCall->getOperand(5); 944 Chain = DAG.getStore(Chain, RetVal.getValue(1), FIN, NULL, 0); 945 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(16)); 946 Chain = DAG.getStore(Chain, RetVal, FIN, NULL, 0); 947 return Chain.Val; 948} 949 950//===----------------------------------------------------------------------===// 951// C & StdCall & Fast Calling Convention implementation 952//===----------------------------------------------------------------------===// 953// StdCall calling convention seems to be standard for many Windows' API 954// routines and around. It differs from C calling convention just a little: 955// callee should clean up the stack, not caller. Symbols should be also 956// decorated in some fancy way :) It doesn't support any vector arguments. 957// For info on fast calling convention see Fast Calling Convention (tail call) 958// implementation LowerX86_32FastCCCallTo. 959 960/// AddLiveIn - This helper function adds the specified physical register to the 961/// MachineFunction as a live in value. It also creates a corresponding virtual 962/// register for it. 963static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg, 964 const TargetRegisterClass *RC) { 965 assert(RC->contains(PReg) && "Not the correct regclass!"); 966 unsigned VReg = MF.getRegInfo().createVirtualRegister(RC); 967 MF.getRegInfo().addLiveIn(PReg, VReg); 968 return VReg; 969} 970 971// Determines whether a CALL node uses struct return semantics. 972static bool CallIsStructReturn(SDOperand Op) { 973 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 974 if (!NumOps) 975 return false; 976 977 ConstantSDNode *Flags = cast<ConstantSDNode>(Op.getOperand(6)); 978 return Flags->getValue() & ISD::ParamFlags::StructReturn; 979} 980 981// Determines whether a FORMAL_ARGUMENTS node uses struct return semantics. 982static bool ArgsAreStructReturn(SDOperand Op) { 983 unsigned NumArgs = Op.Val->getNumValues() - 1; 984 if (!NumArgs) 985 return false; 986 987 ConstantSDNode *Flags = cast<ConstantSDNode>(Op.getOperand(3)); 988 return Flags->getValue() & ISD::ParamFlags::StructReturn; 989} 990 991// Determines whether a CALL or FORMAL_ARGUMENTS node requires the callee to pop 992// its own arguments. Callee pop is necessary to support tail calls. 993bool X86TargetLowering::IsCalleePop(SDOperand Op) { 994 bool IsVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 995 if (IsVarArg) 996 return false; 997 998 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { 999 default: 1000 return false; 1001 case CallingConv::X86_StdCall: 1002 return !Subtarget->is64Bit(); 1003 case CallingConv::X86_FastCall: 1004 return !Subtarget->is64Bit(); 1005 case CallingConv::Fast: 1006 return PerformTailCallOpt; 1007 } 1008} 1009 1010// Selects the correct CCAssignFn for a CALL or FORMAL_ARGUMENTS node. 1011CCAssignFn *X86TargetLowering::CCAssignFnForNode(SDOperand Op) const { 1012 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1013 1014 if (Subtarget->is64Bit()) 1015 if (CC == CallingConv::Fast && PerformTailCallOpt) 1016 return CC_X86_64_TailCall; 1017 else 1018 return CC_X86_64_C; 1019 1020 if (CC == CallingConv::X86_FastCall) 1021 return CC_X86_32_FastCall; 1022 else if (CC == CallingConv::Fast && PerformTailCallOpt) 1023 return CC_X86_32_TailCall; 1024 else 1025 return CC_X86_32_C; 1026} 1027 1028// Selects the appropriate decoration to apply to a MachineFunction containing a 1029// given FORMAL_ARGUMENTS node. 1030NameDecorationStyle 1031X86TargetLowering::NameDecorationForFORMAL_ARGUMENTS(SDOperand Op) { 1032 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1033 if (CC == CallingConv::X86_FastCall) 1034 return FastCall; 1035 else if (CC == CallingConv::X86_StdCall) 1036 return StdCall; 1037 return None; 1038} 1039 1040 1041// IsPossiblyOverwrittenArgumentOfTailCall - Check if the operand could possibly 1042// be overwritten when lowering the outgoing arguments in a tail call. Currently 1043// the implementation of this call is very conservative and assumes all 1044// arguments sourcing from FORMAL_ARGUMENTS or a CopyFromReg with virtual 1045// registers would be overwritten by direct lowering. 1046// Possible improvement: 1047// Check FORMAL_ARGUMENTS corresponding MERGE_VALUES for CopyFromReg nodes 1048// indicating inreg passed arguments which also need not be lowered to a safe 1049// stack slot. 1050static bool IsPossiblyOverwrittenArgumentOfTailCall(SDOperand Op) { 1051 RegisterSDNode * OpReg = NULL; 1052 if (Op.getOpcode() == ISD::FORMAL_ARGUMENTS || 1053 (Op.getOpcode()== ISD::CopyFromReg && 1054 (OpReg = cast<RegisterSDNode>(Op.getOperand(1))) && 1055 OpReg->getReg() >= MRegisterInfo::FirstVirtualRegister)) 1056 return true; 1057 return false; 1058} 1059 1060// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1061// by "Src" to address "Dst" with size and alignment information specified by 1062// the specific parameter attribute. The copy will be passed as a byval function 1063// parameter. 1064static SDOperand 1065CreateCopyOfByValArgument(SDOperand Src, SDOperand Dst, SDOperand Chain, 1066 unsigned Flags, SelectionDAG &DAG) { 1067 unsigned Align = 1 << 1068 ((Flags & ISD::ParamFlags::ByValAlign) >> ISD::ParamFlags::ByValAlignOffs); 1069 unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >> 1070 ISD::ParamFlags::ByValSizeOffs; 1071 SDOperand AlignNode = DAG.getConstant(Align, MVT::i32); 1072 SDOperand SizeNode = DAG.getConstant(Size, MVT::i32); 1073 SDOperand AlwaysInline = DAG.getConstant(1, MVT::i32); 1074 return DAG.getMemcpy(Chain, Dst, Src, SizeNode, AlignNode, AlwaysInline); 1075} 1076 1077SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG, 1078 const CCValAssign &VA, 1079 MachineFrameInfo *MFI, 1080 SDOperand Root, unsigned i) { 1081 // Create the nodes corresponding to a load from this parameter slot. 1082 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue(); 1083 bool isByVal = Flags & ISD::ParamFlags::ByVal; 1084 1085 // FIXME: For now, all byval parameter objects are marked mutable. This 1086 // can be changed with more analysis. 1087 int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, 1088 VA.getLocMemOffset(), !isByVal); 1089 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 1090 if (isByVal) 1091 return FIN; 1092 return DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0); 1093} 1094 1095SDOperand 1096X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { 1097 MachineFunction &MF = DAG.getMachineFunction(); 1098 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1099 1100 const Function* Fn = MF.getFunction(); 1101 if (Fn->hasExternalLinkage() && 1102 Subtarget->isTargetCygMing() && 1103 Fn->getName() == "main") 1104 FuncInfo->setForceFramePointer(true); 1105 1106 // Decorate the function name. 1107 FuncInfo->setDecorationStyle(NameDecorationForFORMAL_ARGUMENTS(Op)); 1108 1109 MachineFrameInfo *MFI = MF.getFrameInfo(); 1110 SDOperand Root = Op.getOperand(0); 1111 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1112 unsigned CC = MF.getFunction()->getCallingConv(); 1113 bool Is64Bit = Subtarget->is64Bit(); 1114 1115 assert(!(isVarArg && CC == CallingConv::Fast) && 1116 "Var args not supported with calling convention fastcc"); 1117 1118 // Assign locations to all of the incoming arguments. 1119 SmallVector<CCValAssign, 16> ArgLocs; 1120 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1121 CCInfo.AnalyzeFormalArguments(Op.Val, CCAssignFnForNode(Op)); 1122 1123 SmallVector<SDOperand, 8> ArgValues; 1124 unsigned LastVal = ~0U; 1125 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1126 CCValAssign &VA = ArgLocs[i]; 1127 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1128 // places. 1129 assert(VA.getValNo() != LastVal && 1130 "Don't support value assigned to multiple locs yet"); 1131 LastVal = VA.getValNo(); 1132 1133 if (VA.isRegLoc()) { 1134 MVT::ValueType RegVT = VA.getLocVT(); 1135 TargetRegisterClass *RC; 1136 if (RegVT == MVT::i32) 1137 RC = X86::GR32RegisterClass; 1138 else if (Is64Bit && RegVT == MVT::i64) 1139 RC = X86::GR64RegisterClass; 1140 else if (Is64Bit && RegVT == MVT::f32) 1141 RC = X86::FR32RegisterClass; 1142 else if (Is64Bit && RegVT == MVT::f64) 1143 RC = X86::FR64RegisterClass; 1144 else { 1145 assert(MVT::isVector(RegVT)); 1146 if (Is64Bit && MVT::getSizeInBits(RegVT) == 64) { 1147 RC = X86::GR64RegisterClass; // MMX values are passed in GPRs. 1148 RegVT = MVT::i64; 1149 } else 1150 RC = X86::VR128RegisterClass; 1151 } 1152 1153 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 1154 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 1155 1156 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1157 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1158 // right size. 1159 if (VA.getLocInfo() == CCValAssign::SExt) 1160 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 1161 DAG.getValueType(VA.getValVT())); 1162 else if (VA.getLocInfo() == CCValAssign::ZExt) 1163 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 1164 DAG.getValueType(VA.getValVT())); 1165 1166 if (VA.getLocInfo() != CCValAssign::Full) 1167 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 1168 1169 // Handle MMX values passed in GPRs. 1170 if (Is64Bit && RegVT != VA.getLocVT() && RC == X86::GR64RegisterClass && 1171 MVT::getSizeInBits(RegVT) == 64) 1172 ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); 1173 1174 ArgValues.push_back(ArgValue); 1175 } else { 1176 assert(VA.isMemLoc()); 1177 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i)); 1178 } 1179 } 1180 1181 unsigned StackSize = CCInfo.getNextStackOffset(); 1182 // align stack specially for tail calls 1183 if (CC == CallingConv::Fast) 1184 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1185 1186 // If the function takes variable number of arguments, make a frame index for 1187 // the start of the first vararg value... for expansion of llvm.va_start. 1188 if (isVarArg) { 1189 if (Is64Bit || CC != CallingConv::X86_FastCall) { 1190 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); 1191 } 1192 if (Is64Bit) { 1193 static const unsigned GPR64ArgRegs[] = { 1194 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1195 }; 1196 static const unsigned XMMArgRegs[] = { 1197 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1198 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1199 }; 1200 1201 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 6); 1202 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1203 1204 // For X86-64, if there are vararg parameters that are passed via 1205 // registers, then we must store them to their spots on the stack so they 1206 // may be loaded by deferencing the result of va_next. 1207 VarArgsGPOffset = NumIntRegs * 8; 1208 VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16; 1209 RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16); 1210 1211 // Store the integer parameter registers. 1212 SmallVector<SDOperand, 8> MemOps; 1213 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 1214 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1215 DAG.getIntPtrConstant(VarArgsGPOffset)); 1216 for (; NumIntRegs != 6; ++NumIntRegs) { 1217 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], 1218 X86::GR64RegisterClass); 1219 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1220 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1221 MemOps.push_back(Store); 1222 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1223 DAG.getIntPtrConstant(8)); 1224 } 1225 1226 // Now store the XMM (fp + vector) parameter registers. 1227 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1228 DAG.getIntPtrConstant(VarArgsFPOffset)); 1229 for (; NumXMMRegs != 8; ++NumXMMRegs) { 1230 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 1231 X86::VR128RegisterClass); 1232 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); 1233 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1234 MemOps.push_back(Store); 1235 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1236 DAG.getIntPtrConstant(16)); 1237 } 1238 if (!MemOps.empty()) 1239 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 1240 &MemOps[0], MemOps.size()); 1241 } 1242 } 1243 1244 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1245 // arguments and the arguments after the retaddr has been pushed are 1246 // aligned. 1247 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1248 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1249 (StackSize & 7) == 0) 1250 StackSize += 4; 1251 1252 ArgValues.push_back(Root); 1253 1254 // Some CCs need callee pop. 1255 if (IsCalleePop(Op)) { 1256 BytesToPopOnReturn = StackSize; // Callee pops everything. 1257 BytesCallerReserves = 0; 1258 } else { 1259 BytesToPopOnReturn = 0; // Callee pops nothing. 1260 // If this is an sret function, the return should pop the hidden pointer. 1261 if (!Is64Bit && ArgsAreStructReturn(Op)) 1262 BytesToPopOnReturn = 4; 1263 BytesCallerReserves = StackSize; 1264 } 1265 1266 if (!Is64Bit) { 1267 RegSaveFrameIndex = 0xAAAAAAA; // RegSaveFrameIndex is X86-64 only. 1268 if (CC == CallingConv::X86_FastCall) 1269 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 1270 } 1271 1272 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 1273 1274 // Return the new list of results. 1275 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1276 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1277} 1278 1279SDOperand 1280X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG, 1281 const SDOperand &StackPtr, 1282 const CCValAssign &VA, 1283 SDOperand Chain, 1284 SDOperand Arg) { 1285 SDOperand PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset()); 1286 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1287 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1288 unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue(); 1289 if (Flags & ISD::ParamFlags::ByVal) { 1290 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG); 1291 } 1292 return DAG.getStore(Chain, Arg, PtrOff, NULL, 0); 1293} 1294 1295/// ClassifyX86_64SRetCallReturn - Classify how to implement a x86-64 1296/// struct return call to the specified function. X86-64 ABI specifies 1297/// some SRet calls are actually returned in registers. Since current 1298/// LLVM cannot represent multi-value calls, they are represent as 1299/// calls where the results are passed in a hidden struct provided by 1300/// the caller. This function examines the type of the struct to 1301/// determine the correct way to implement the call. 1302X86::X86_64SRet 1303X86TargetLowering::ClassifyX86_64SRetCallReturn(const Function *Fn) { 1304 // FIXME: Disabled for now. 1305 return X86::InMemory; 1306 1307 const PointerType *PTy = cast<PointerType>(Fn->arg_begin()->getType()); 1308 const Type *RTy = PTy->getElementType(); 1309 unsigned Size = getTargetData()->getABITypeSize(RTy); 1310 if (Size != 16 && Size != 32) 1311 return X86::InMemory; 1312 1313 if (Size == 32) { 1314 const StructType *STy = dyn_cast<StructType>(RTy); 1315 if (!STy) return X86::InMemory; 1316 if (STy->getNumElements() == 2 && 1317 STy->getElementType(0) == Type::X86_FP80Ty && 1318 STy->getElementType(1) == Type::X86_FP80Ty) 1319 return X86::InX87; 1320 } 1321 1322 bool AllFP = true; 1323 for (Type::subtype_iterator I = RTy->subtype_begin(), E = RTy->subtype_end(); 1324 I != E; ++I) { 1325 const Type *STy = I->get(); 1326 if (!STy->isFPOrFPVector()) { 1327 AllFP = false; 1328 break; 1329 } 1330 } 1331 1332 if (AllFP) 1333 return X86::InSSE; 1334 return X86::InGPR64; 1335} 1336 1337void X86TargetLowering::X86_64AnalyzeSRetCallOperands(SDNode *TheCall, 1338 CCAssignFn *Fn, 1339 CCState &CCInfo) { 1340 unsigned NumOps = (TheCall->getNumOperands() - 5) / 2; 1341 for (unsigned i = 1; i != NumOps; ++i) { 1342 MVT::ValueType ArgVT = TheCall->getOperand(5+2*i).getValueType(); 1343 SDOperand FlagOp = TheCall->getOperand(5+2*i+1); 1344 unsigned ArgFlags =cast<ConstantSDNode>(FlagOp)->getValue(); 1345 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo)) { 1346 cerr << "Call operand #" << i << " has unhandled type " 1347 << MVT::getValueTypeString(ArgVT) << "\n"; 1348 abort(); 1349 } 1350 } 1351} 1352 1353SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { 1354 MachineFunction &MF = DAG.getMachineFunction(); 1355 SDOperand Chain = Op.getOperand(0); 1356 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1357 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1358 bool IsTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0 1359 && CC == CallingConv::Fast && PerformTailCallOpt; 1360 SDOperand Callee = Op.getOperand(4); 1361 bool Is64Bit = Subtarget->is64Bit(); 1362 bool IsStructRet = CallIsStructReturn(Op); 1363 1364 assert(!(isVarArg && CC == CallingConv::Fast) && 1365 "Var args not supported with calling convention fastcc"); 1366 1367 // Analyze operands of the call, assigning locations to each operand. 1368 SmallVector<CCValAssign, 16> ArgLocs; 1369 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1370 CCAssignFn *CCFn = CCAssignFnForNode(Op); 1371 1372 X86::X86_64SRet SRetMethod = X86::InMemory; 1373 if (Is64Bit && IsStructRet) 1374 // FIXME: We can't figure out type of the sret structure for indirect 1375 // calls. We need to copy more information from CallSite to the ISD::CALL 1376 // node. 1377 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1378 SRetMethod = 1379 ClassifyX86_64SRetCallReturn(dyn_cast<Function>(G->getGlobal())); 1380 1381 // UGLY HACK! For x86-64, some 128-bit aggregates are returns in a pair of 1382 // registers. Unfortunately, llvm does not support i128 yet so we pretend it's 1383 // a sret call. 1384 if (SRetMethod != X86::InMemory) 1385 X86_64AnalyzeSRetCallOperands(Op.Val, CCFn, CCInfo); 1386 else 1387 CCInfo.AnalyzeCallOperands(Op.Val, CCFn); 1388 1389 // Get a count of how many bytes are to be pushed on the stack. 1390 unsigned NumBytes = CCInfo.getNextStackOffset(); 1391 if (CC == CallingConv::Fast) 1392 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 1393 1394 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1395 // arguments and the arguments after the retaddr has been pushed are aligned. 1396 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1397 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1398 (NumBytes & 7) == 0) 1399 NumBytes += 4; 1400 1401 int FPDiff = 0; 1402 if (IsTailCall) { 1403 // Lower arguments at fp - stackoffset + fpdiff. 1404 unsigned NumBytesCallerPushed = 1405 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 1406 FPDiff = NumBytesCallerPushed - NumBytes; 1407 1408 // Set the delta of movement of the returnaddr stackslot. 1409 // But only set if delta is greater than previous delta. 1410 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 1411 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 1412 } 1413 1414 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes)); 1415 1416 SDOperand RetAddrFrIdx, NewRetAddrFrIdx; 1417 if (IsTailCall) { 1418 // Adjust the Return address stack slot. 1419 if (FPDiff) { 1420 MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32; 1421 RetAddrFrIdx = getReturnAddressFrameIndex(DAG); 1422 // Load the "old" Return address. 1423 RetAddrFrIdx = 1424 DAG.getLoad(VT, Chain,RetAddrFrIdx, NULL, 0); 1425 // Calculate the new stack slot for the return address. 1426 int SlotSize = Is64Bit ? 8 : 4; 1427 int NewReturnAddrFI = 1428 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize); 1429 NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 1430 Chain = SDOperand(RetAddrFrIdx.Val, 1); 1431 } 1432 } 1433 1434 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1435 SmallVector<SDOperand, 8> MemOpChains; 1436 1437 SDOperand StackPtr; 1438 1439 // Walk the register/memloc assignments, inserting copies/loads. For tail 1440 // calls, lower arguments which could otherwise be possibly overwritten to the 1441 // stack slot where they would go on normal function calls. 1442 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1443 CCValAssign &VA = ArgLocs[i]; 1444 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1445 1446 // Promote the value if needed. 1447 switch (VA.getLocInfo()) { 1448 default: assert(0 && "Unknown loc info!"); 1449 case CCValAssign::Full: break; 1450 case CCValAssign::SExt: 1451 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1452 break; 1453 case CCValAssign::ZExt: 1454 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1455 break; 1456 case CCValAssign::AExt: 1457 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1458 break; 1459 } 1460 1461 if (VA.isRegLoc()) { 1462 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1463 } else { 1464 if (!IsTailCall || IsPossiblyOverwrittenArgumentOfTailCall(Arg)) { 1465 assert(VA.isMemLoc()); 1466 if (StackPtr.Val == 0) 1467 StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy()); 1468 1469 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1470 Arg)); 1471 } 1472 } 1473 } 1474 1475 if (!MemOpChains.empty()) 1476 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1477 &MemOpChains[0], MemOpChains.size()); 1478 1479 // Build a sequence of copy-to-reg nodes chained together with token chain 1480 // and flag operands which copy the outgoing args into registers. 1481 SDOperand InFlag; 1482 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1483 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1484 InFlag); 1485 InFlag = Chain.getValue(1); 1486 } 1487 1488 if (IsTailCall) 1489 InFlag = SDOperand(); // ??? Isn't this nuking the preceding loop's output? 1490 1491 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1492 // GOT pointer. 1493 // Does not work with tail call since ebx is not restored correctly by 1494 // tailcaller. TODO: at least for x86 - verify for x86-64 1495 if (!IsTailCall && !Is64Bit && 1496 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1497 Subtarget->isPICStyleGOT()) { 1498 Chain = DAG.getCopyToReg(Chain, X86::EBX, 1499 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 1500 InFlag); 1501 InFlag = Chain.getValue(1); 1502 } 1503 1504 if (Is64Bit && isVarArg) { 1505 // From AMD64 ABI document: 1506 // For calls that may call functions that use varargs or stdargs 1507 // (prototype-less calls or calls to functions containing ellipsis (...) in 1508 // the declaration) %al is used as hidden argument to specify the number 1509 // of SSE registers used. The contents of %al do not need to match exactly 1510 // the number of registers, but must be an ubound on the number of SSE 1511 // registers used and is in the range 0 - 8 inclusive. 1512 1513 // Count the number of XMM registers allocated. 1514 static const unsigned XMMArgRegs[] = { 1515 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1516 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1517 }; 1518 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1519 1520 Chain = DAG.getCopyToReg(Chain, X86::AL, 1521 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 1522 InFlag = Chain.getValue(1); 1523 } 1524 1525 // For tail calls lower the arguments to the 'real' stack slot. 1526 if (IsTailCall) { 1527 SmallVector<SDOperand, 8> MemOpChains2; 1528 SDOperand FIN; 1529 int FI = 0; 1530 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1531 CCValAssign &VA = ArgLocs[i]; 1532 if (!VA.isRegLoc()) { 1533 assert(VA.isMemLoc()); 1534 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1535 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1536 unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue(); 1537 // Create frame index. 1538 int32_t Offset = VA.getLocMemOffset()+FPDiff; 1539 uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8; 1540 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); 1541 FIN = DAG.getFrameIndex(FI, MVT::i32); 1542 SDOperand Source = Arg; 1543 if (IsPossiblyOverwrittenArgumentOfTailCall(Arg)) { 1544 // Copy from stack slots to stack slot of a tail called function. This 1545 // needs to be done because if we would lower the arguments directly 1546 // to their real stack slot we might end up overwriting each other. 1547 // Get source stack slot. 1548 Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 1549 if (StackPtr.Val == 0) 1550 StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy()); 1551 Source = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, Source); 1552 if ((Flags & ISD::ParamFlags::ByVal)==0) 1553 Source = DAG.getLoad(VA.getValVT(), Chain, Source, NULL, 0); 1554 } 1555 1556 if (Flags & ISD::ParamFlags::ByVal) { 1557 // Copy relative to framepointer. 1558 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, Chain, 1559 Flags, DAG)); 1560 } else { 1561 // Store relative to framepointer. 1562 MemOpChains2.push_back(DAG.getStore(Chain, Source, FIN, NULL, 0)); 1563 } 1564 } 1565 } 1566 1567 if (!MemOpChains2.empty()) 1568 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1569 &MemOpChains2[0], MemOpChains2.size()); 1570 1571 // Store the return address to the appropriate stack slot. 1572 if (FPDiff) 1573 Chain = DAG.getStore(Chain,RetAddrFrIdx, NewRetAddrFrIdx, NULL, 0); 1574 } 1575 1576 // If the callee is a GlobalAddress node (quite common, every direct call is) 1577 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1578 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1579 // We should use extra load for direct calls to dllimported functions in 1580 // non-JIT mode. 1581 if ((IsTailCall || !Is64Bit || 1582 getTargetMachine().getCodeModel() != CodeModel::Large) 1583 && !Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1584 getTargetMachine(), true)) 1585 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1586 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1587 if (IsTailCall || !Is64Bit || 1588 getTargetMachine().getCodeModel() != CodeModel::Large) 1589 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1590 } else if (IsTailCall) { 1591 assert(Callee.getOpcode() == ISD::LOAD && 1592 "Function destination must be loaded into virtual register"); 1593 unsigned Opc = Is64Bit ? X86::R9 : X86::ECX; 1594 1595 Chain = DAG.getCopyToReg(Chain, 1596 DAG.getRegister(Opc, getPointerTy()) , 1597 Callee,InFlag); 1598 Callee = DAG.getRegister(Opc, getPointerTy()); 1599 // Add register as live out. 1600 DAG.getMachineFunction().getRegInfo().addLiveOut(Opc); 1601 } 1602 1603 // Returns a chain & a flag for retval copy to use. 1604 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1605 SmallVector<SDOperand, 8> Ops; 1606 1607 if (IsTailCall) { 1608 Ops.push_back(Chain); 1609 Ops.push_back(DAG.getIntPtrConstant(NumBytes)); 1610 Ops.push_back(DAG.getIntPtrConstant(0)); 1611 if (InFlag.Val) 1612 Ops.push_back(InFlag); 1613 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1614 InFlag = Chain.getValue(1); 1615 1616 // Returns a chain & a flag for retval copy to use. 1617 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1618 Ops.clear(); 1619 } 1620 1621 Ops.push_back(Chain); 1622 Ops.push_back(Callee); 1623 1624 if (IsTailCall) 1625 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 1626 1627 // Add an implicit use GOT pointer in EBX. 1628 if (!IsTailCall && !Is64Bit && 1629 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1630 Subtarget->isPICStyleGOT()) 1631 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 1632 1633 // Add argument registers to the end of the list so that they are known live 1634 // into the call. 1635 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1636 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1637 RegsToPass[i].second.getValueType())); 1638 1639 if (InFlag.Val) 1640 Ops.push_back(InFlag); 1641 1642 if (IsTailCall) { 1643 assert(InFlag.Val && 1644 "Flag must be set. Depend on flag being set in LowerRET"); 1645 Chain = DAG.getNode(X86ISD::TAILCALL, 1646 Op.Val->getVTList(), &Ops[0], Ops.size()); 1647 1648 return SDOperand(Chain.Val, Op.ResNo); 1649 } 1650 1651 Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size()); 1652 InFlag = Chain.getValue(1); 1653 1654 // Create the CALLSEQ_END node. 1655 unsigned NumBytesForCalleeToPush; 1656 if (IsCalleePop(Op)) 1657 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 1658 else if (!Is64Bit && IsStructRet) 1659 // If this is is a call to a struct-return function, the callee 1660 // pops the hidden struct pointer, so we have to push it back. 1661 // This is common for Darwin/X86, Linux & Mingw32 targets. 1662 NumBytesForCalleeToPush = 4; 1663 else 1664 NumBytesForCalleeToPush = 0; // Callee pops nothing. 1665 1666 // Returns a flag for retval copy to use. 1667 Chain = DAG.getCALLSEQ_END(Chain, 1668 DAG.getIntPtrConstant(NumBytes), 1669 DAG.getIntPtrConstant(NumBytesForCalleeToPush), 1670 InFlag); 1671 InFlag = Chain.getValue(1); 1672 1673 // Handle result values, copying them out of physregs into vregs that we 1674 // return. 1675 switch (SRetMethod) { 1676 default: 1677 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1678 case X86::InGPR64: 1679 return SDOperand(LowerCallResultToTwo64BitRegs(Chain, InFlag, Op.Val, 1680 X86::RAX, X86::RDX, 1681 MVT::i64, DAG), Op.ResNo); 1682 case X86::InSSE: 1683 return SDOperand(LowerCallResultToTwo64BitRegs(Chain, InFlag, Op.Val, 1684 X86::XMM0, X86::XMM1, 1685 MVT::f64, DAG), Op.ResNo); 1686 case X86::InX87: 1687 return SDOperand(LowerCallResultToTwoX87Regs(Chain, InFlag, Op.Val, DAG), 1688 Op.ResNo); 1689 } 1690} 1691 1692 1693//===----------------------------------------------------------------------===// 1694// Fast Calling Convention (tail call) implementation 1695//===----------------------------------------------------------------------===// 1696 1697// Like std call, callee cleans arguments, convention except that ECX is 1698// reserved for storing the tail called function address. Only 2 registers are 1699// free for argument passing (inreg). Tail call optimization is performed 1700// provided: 1701// * tailcallopt is enabled 1702// * caller/callee are fastcc 1703// * elf/pic is disabled OR 1704// * elf/pic enabled + callee is in module + callee has 1705// visibility protected or hidden 1706// To keep the stack aligned according to platform abi the function 1707// GetAlignedArgumentStackSize ensures that argument delta is always multiples 1708// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 1709// If a tail called function callee has more arguments than the caller the 1710// caller needs to make sure that there is room to move the RETADDR to. This is 1711// achieved by reserving an area the size of the argument delta right after the 1712// original REtADDR, but before the saved framepointer or the spilled registers 1713// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 1714// stack layout: 1715// arg1 1716// arg2 1717// RETADDR 1718// [ new RETADDR 1719// move area ] 1720// (possible EBP) 1721// ESI 1722// EDI 1723// local1 .. 1724 1725/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 1726/// for a 16 byte align requirement. 1727unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 1728 SelectionDAG& DAG) { 1729 if (PerformTailCallOpt) { 1730 MachineFunction &MF = DAG.getMachineFunction(); 1731 const TargetMachine &TM = MF.getTarget(); 1732 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 1733 unsigned StackAlignment = TFI.getStackAlignment(); 1734 uint64_t AlignMask = StackAlignment - 1; 1735 int64_t Offset = StackSize; 1736 unsigned SlotSize = Subtarget->is64Bit() ? 8 : 4; 1737 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 1738 // Number smaller than 12 so just add the difference. 1739 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 1740 } else { 1741 // Mask out lower bits, add stackalignment once plus the 12 bytes. 1742 Offset = ((~AlignMask) & Offset) + StackAlignment + 1743 (StackAlignment-SlotSize); 1744 } 1745 StackSize = Offset; 1746 } 1747 return StackSize; 1748} 1749 1750/// IsEligibleForTailCallElimination - Check to see whether the next instruction 1751/// following the call is a return. A function is eligible if caller/callee 1752/// calling conventions match, currently only fastcc supports tail calls, and 1753/// the function CALL is immediatly followed by a RET. 1754bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call, 1755 SDOperand Ret, 1756 SelectionDAG& DAG) const { 1757 if (!PerformTailCallOpt) 1758 return false; 1759 1760 // Check whether CALL node immediatly preceeds the RET node and whether the 1761 // return uses the result of the node or is a void return. 1762 unsigned NumOps = Ret.getNumOperands(); 1763 if ((NumOps == 1 && 1764 (Ret.getOperand(0) == SDOperand(Call.Val,1) || 1765 Ret.getOperand(0) == SDOperand(Call.Val,0))) || 1766 (NumOps > 1 && 1767 Ret.getOperand(0) == SDOperand(Call.Val,Call.Val->getNumValues()-1) && 1768 Ret.getOperand(1) == SDOperand(Call.Val,0))) { 1769 MachineFunction &MF = DAG.getMachineFunction(); 1770 unsigned CallerCC = MF.getFunction()->getCallingConv(); 1771 unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue(); 1772 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 1773 SDOperand Callee = Call.getOperand(4); 1774 // On elf/pic %ebx needs to be livein. 1775 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ || 1776 !Subtarget->isPICStyleGOT()) 1777 return true; 1778 1779 // Can only do local tail calls with PIC. 1780 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1781 return G->getGlobal()->hasHiddenVisibility() 1782 || G->getGlobal()->hasProtectedVisibility(); 1783 } 1784 } 1785 1786 return false; 1787} 1788 1789//===----------------------------------------------------------------------===// 1790// Other Lowering Hooks 1791//===----------------------------------------------------------------------===// 1792 1793 1794SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { 1795 MachineFunction &MF = DAG.getMachineFunction(); 1796 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1797 int ReturnAddrIndex = FuncInfo->getRAIndex(); 1798 1799 if (ReturnAddrIndex == 0) { 1800 // Set up a frame object for the return address. 1801 if (Subtarget->is64Bit()) 1802 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8); 1803 else 1804 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4); 1805 1806 FuncInfo->setRAIndex(ReturnAddrIndex); 1807 } 1808 1809 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 1810} 1811 1812 1813 1814/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86 1815/// specific condition code. It returns a false if it cannot do a direct 1816/// translation. X86CC is the translated CondCode. LHS/RHS are modified as 1817/// needed. 1818static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 1819 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS, 1820 SelectionDAG &DAG) { 1821 X86CC = X86::COND_INVALID; 1822 if (!isFP) { 1823 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 1824 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 1825 // X > -1 -> X == 0, jump !sign. 1826 RHS = DAG.getConstant(0, RHS.getValueType()); 1827 X86CC = X86::COND_NS; 1828 return true; 1829 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 1830 // X < 0 -> X == 0, jump on sign. 1831 X86CC = X86::COND_S; 1832 return true; 1833 } else if (SetCCOpcode == ISD::SETLT && RHSC->getValue() == 1) { 1834 // X < 1 -> X <= 0 1835 RHS = DAG.getConstant(0, RHS.getValueType()); 1836 X86CC = X86::COND_LE; 1837 return true; 1838 } 1839 } 1840 1841 switch (SetCCOpcode) { 1842 default: break; 1843 case ISD::SETEQ: X86CC = X86::COND_E; break; 1844 case ISD::SETGT: X86CC = X86::COND_G; break; 1845 case ISD::SETGE: X86CC = X86::COND_GE; break; 1846 case ISD::SETLT: X86CC = X86::COND_L; break; 1847 case ISD::SETLE: X86CC = X86::COND_LE; break; 1848 case ISD::SETNE: X86CC = X86::COND_NE; break; 1849 case ISD::SETULT: X86CC = X86::COND_B; break; 1850 case ISD::SETUGT: X86CC = X86::COND_A; break; 1851 case ISD::SETULE: X86CC = X86::COND_BE; break; 1852 case ISD::SETUGE: X86CC = X86::COND_AE; break; 1853 } 1854 } else { 1855 // On a floating point condition, the flags are set as follows: 1856 // ZF PF CF op 1857 // 0 | 0 | 0 | X > Y 1858 // 0 | 0 | 1 | X < Y 1859 // 1 | 0 | 0 | X == Y 1860 // 1 | 1 | 1 | unordered 1861 bool Flip = false; 1862 switch (SetCCOpcode) { 1863 default: break; 1864 case ISD::SETUEQ: 1865 case ISD::SETEQ: X86CC = X86::COND_E; break; 1866 case ISD::SETOLT: Flip = true; // Fallthrough 1867 case ISD::SETOGT: 1868 case ISD::SETGT: X86CC = X86::COND_A; break; 1869 case ISD::SETOLE: Flip = true; // Fallthrough 1870 case ISD::SETOGE: 1871 case ISD::SETGE: X86CC = X86::COND_AE; break; 1872 case ISD::SETUGT: Flip = true; // Fallthrough 1873 case ISD::SETULT: 1874 case ISD::SETLT: X86CC = X86::COND_B; break; 1875 case ISD::SETUGE: Flip = true; // Fallthrough 1876 case ISD::SETULE: 1877 case ISD::SETLE: X86CC = X86::COND_BE; break; 1878 case ISD::SETONE: 1879 case ISD::SETNE: X86CC = X86::COND_NE; break; 1880 case ISD::SETUO: X86CC = X86::COND_P; break; 1881 case ISD::SETO: X86CC = X86::COND_NP; break; 1882 } 1883 if (Flip) 1884 std::swap(LHS, RHS); 1885 } 1886 1887 return X86CC != X86::COND_INVALID; 1888} 1889 1890/// hasFPCMov - is there a floating point cmov for the specific X86 condition 1891/// code. Current x86 isa includes the following FP cmov instructions: 1892/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 1893static bool hasFPCMov(unsigned X86CC) { 1894 switch (X86CC) { 1895 default: 1896 return false; 1897 case X86::COND_B: 1898 case X86::COND_BE: 1899 case X86::COND_E: 1900 case X86::COND_P: 1901 case X86::COND_A: 1902 case X86::COND_AE: 1903 case X86::COND_NE: 1904 case X86::COND_NP: 1905 return true; 1906 } 1907} 1908 1909/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return 1910/// true if Op is undef or if its value falls within the specified range (L, H]. 1911static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { 1912 if (Op.getOpcode() == ISD::UNDEF) 1913 return true; 1914 1915 unsigned Val = cast<ConstantSDNode>(Op)->getValue(); 1916 return (Val >= Low && Val < Hi); 1917} 1918 1919/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return 1920/// true if Op is undef or if its value equal to the specified value. 1921static bool isUndefOrEqual(SDOperand Op, unsigned Val) { 1922 if (Op.getOpcode() == ISD::UNDEF) 1923 return true; 1924 return cast<ConstantSDNode>(Op)->getValue() == Val; 1925} 1926 1927/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand 1928/// specifies a shuffle of elements that is suitable for input to PSHUFD. 1929bool X86::isPSHUFDMask(SDNode *N) { 1930 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1931 1932 if (N->getNumOperands() != 2 && N->getNumOperands() != 4) 1933 return false; 1934 1935 // Check if the value doesn't reference the second vector. 1936 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1937 SDOperand Arg = N->getOperand(i); 1938 if (Arg.getOpcode() == ISD::UNDEF) continue; 1939 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1940 if (cast<ConstantSDNode>(Arg)->getValue() >= e) 1941 return false; 1942 } 1943 1944 return true; 1945} 1946 1947/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand 1948/// specifies a shuffle of elements that is suitable for input to PSHUFHW. 1949bool X86::isPSHUFHWMask(SDNode *N) { 1950 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1951 1952 if (N->getNumOperands() != 8) 1953 return false; 1954 1955 // Lower quadword copied in order. 1956 for (unsigned i = 0; i != 4; ++i) { 1957 SDOperand Arg = N->getOperand(i); 1958 if (Arg.getOpcode() == ISD::UNDEF) continue; 1959 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1960 if (cast<ConstantSDNode>(Arg)->getValue() != i) 1961 return false; 1962 } 1963 1964 // Upper quadword shuffled. 1965 for (unsigned i = 4; i != 8; ++i) { 1966 SDOperand Arg = N->getOperand(i); 1967 if (Arg.getOpcode() == ISD::UNDEF) continue; 1968 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1969 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 1970 if (Val < 4 || Val > 7) 1971 return false; 1972 } 1973 1974 return true; 1975} 1976 1977/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand 1978/// specifies a shuffle of elements that is suitable for input to PSHUFLW. 1979bool X86::isPSHUFLWMask(SDNode *N) { 1980 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1981 1982 if (N->getNumOperands() != 8) 1983 return false; 1984 1985 // Upper quadword copied in order. 1986 for (unsigned i = 4; i != 8; ++i) 1987 if (!isUndefOrEqual(N->getOperand(i), i)) 1988 return false; 1989 1990 // Lower quadword shuffled. 1991 for (unsigned i = 0; i != 4; ++i) 1992 if (!isUndefOrInRange(N->getOperand(i), 0, 4)) 1993 return false; 1994 1995 return true; 1996} 1997 1998/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 1999/// specifies a shuffle of elements that is suitable for input to SHUFP*. 2000static bool isSHUFPMask(const SDOperand *Elems, unsigned NumElems) { 2001 if (NumElems != 2 && NumElems != 4) return false; 2002 2003 unsigned Half = NumElems / 2; 2004 for (unsigned i = 0; i < Half; ++i) 2005 if (!isUndefOrInRange(Elems[i], 0, NumElems)) 2006 return false; 2007 for (unsigned i = Half; i < NumElems; ++i) 2008 if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2)) 2009 return false; 2010 2011 return true; 2012} 2013 2014bool X86::isSHUFPMask(SDNode *N) { 2015 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2016 return ::isSHUFPMask(N->op_begin(), N->getNumOperands()); 2017} 2018 2019/// isCommutedSHUFP - Returns true if the shuffle mask is exactly 2020/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 2021/// half elements to come from vector 1 (which would equal the dest.) and 2022/// the upper half to come from vector 2. 2023static bool isCommutedSHUFP(const SDOperand *Ops, unsigned NumOps) { 2024 if (NumOps != 2 && NumOps != 4) return false; 2025 2026 unsigned Half = NumOps / 2; 2027 for (unsigned i = 0; i < Half; ++i) 2028 if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2)) 2029 return false; 2030 for (unsigned i = Half; i < NumOps; ++i) 2031 if (!isUndefOrInRange(Ops[i], 0, NumOps)) 2032 return false; 2033 return true; 2034} 2035 2036static bool isCommutedSHUFP(SDNode *N) { 2037 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2038 return isCommutedSHUFP(N->op_begin(), N->getNumOperands()); 2039} 2040 2041/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 2042/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 2043bool X86::isMOVHLPSMask(SDNode *N) { 2044 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2045 2046 if (N->getNumOperands() != 4) 2047 return false; 2048 2049 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 2050 return isUndefOrEqual(N->getOperand(0), 6) && 2051 isUndefOrEqual(N->getOperand(1), 7) && 2052 isUndefOrEqual(N->getOperand(2), 2) && 2053 isUndefOrEqual(N->getOperand(3), 3); 2054} 2055 2056/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 2057/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 2058/// <2, 3, 2, 3> 2059bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) { 2060 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2061 2062 if (N->getNumOperands() != 4) 2063 return false; 2064 2065 // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3 2066 return isUndefOrEqual(N->getOperand(0), 2) && 2067 isUndefOrEqual(N->getOperand(1), 3) && 2068 isUndefOrEqual(N->getOperand(2), 2) && 2069 isUndefOrEqual(N->getOperand(3), 3); 2070} 2071 2072/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 2073/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 2074bool X86::isMOVLPMask(SDNode *N) { 2075 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2076 2077 unsigned NumElems = N->getNumOperands(); 2078 if (NumElems != 2 && NumElems != 4) 2079 return false; 2080 2081 for (unsigned i = 0; i < NumElems/2; ++i) 2082 if (!isUndefOrEqual(N->getOperand(i), i + NumElems)) 2083 return false; 2084 2085 for (unsigned i = NumElems/2; i < NumElems; ++i) 2086 if (!isUndefOrEqual(N->getOperand(i), i)) 2087 return false; 2088 2089 return true; 2090} 2091 2092/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand 2093/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D} 2094/// and MOVLHPS. 2095bool X86::isMOVHPMask(SDNode *N) { 2096 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2097 2098 unsigned NumElems = N->getNumOperands(); 2099 if (NumElems != 2 && NumElems != 4) 2100 return false; 2101 2102 for (unsigned i = 0; i < NumElems/2; ++i) 2103 if (!isUndefOrEqual(N->getOperand(i), i)) 2104 return false; 2105 2106 for (unsigned i = 0; i < NumElems/2; ++i) { 2107 SDOperand Arg = N->getOperand(i + NumElems/2); 2108 if (!isUndefOrEqual(Arg, i + NumElems)) 2109 return false; 2110 } 2111 2112 return true; 2113} 2114 2115/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 2116/// specifies a shuffle of elements that is suitable for input to UNPCKL. 2117bool static isUNPCKLMask(const SDOperand *Elts, unsigned NumElts, 2118 bool V2IsSplat = false) { 2119 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2120 return false; 2121 2122 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2123 SDOperand BitI = Elts[i]; 2124 SDOperand BitI1 = Elts[i+1]; 2125 if (!isUndefOrEqual(BitI, j)) 2126 return false; 2127 if (V2IsSplat) { 2128 if (isUndefOrEqual(BitI1, NumElts)) 2129 return false; 2130 } else { 2131 if (!isUndefOrEqual(BitI1, j + NumElts)) 2132 return false; 2133 } 2134 } 2135 2136 return true; 2137} 2138 2139bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) { 2140 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2141 return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2142} 2143 2144/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 2145/// specifies a shuffle of elements that is suitable for input to UNPCKH. 2146bool static isUNPCKHMask(const SDOperand *Elts, unsigned NumElts, 2147 bool V2IsSplat = false) { 2148 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2149 return false; 2150 2151 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2152 SDOperand BitI = Elts[i]; 2153 SDOperand BitI1 = Elts[i+1]; 2154 if (!isUndefOrEqual(BitI, j + NumElts/2)) 2155 return false; 2156 if (V2IsSplat) { 2157 if (isUndefOrEqual(BitI1, NumElts)) 2158 return false; 2159 } else { 2160 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts)) 2161 return false; 2162 } 2163 } 2164 2165 return true; 2166} 2167 2168bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) { 2169 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2170 return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2171} 2172 2173/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 2174/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 2175/// <0, 0, 1, 1> 2176bool X86::isUNPCKL_v_undef_Mask(SDNode *N) { 2177 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2178 2179 unsigned NumElems = N->getNumOperands(); 2180 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2181 return false; 2182 2183 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2184 SDOperand BitI = N->getOperand(i); 2185 SDOperand BitI1 = N->getOperand(i+1); 2186 2187 if (!isUndefOrEqual(BitI, j)) 2188 return false; 2189 if (!isUndefOrEqual(BitI1, j)) 2190 return false; 2191 } 2192 2193 return true; 2194} 2195 2196/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 2197/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 2198/// <2, 2, 3, 3> 2199bool X86::isUNPCKH_v_undef_Mask(SDNode *N) { 2200 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2201 2202 unsigned NumElems = N->getNumOperands(); 2203 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2204 return false; 2205 2206 for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { 2207 SDOperand BitI = N->getOperand(i); 2208 SDOperand BitI1 = N->getOperand(i + 1); 2209 2210 if (!isUndefOrEqual(BitI, j)) 2211 return false; 2212 if (!isUndefOrEqual(BitI1, j)) 2213 return false; 2214 } 2215 2216 return true; 2217} 2218 2219/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 2220/// specifies a shuffle of elements that is suitable for input to MOVSS, 2221/// MOVSD, and MOVD, i.e. setting the lowest element. 2222static bool isMOVLMask(const SDOperand *Elts, unsigned NumElts) { 2223 if (NumElts != 2 && NumElts != 4) 2224 return false; 2225 2226 if (!isUndefOrEqual(Elts[0], NumElts)) 2227 return false; 2228 2229 for (unsigned i = 1; i < NumElts; ++i) { 2230 if (!isUndefOrEqual(Elts[i], i)) 2231 return false; 2232 } 2233 2234 return true; 2235} 2236 2237bool X86::isMOVLMask(SDNode *N) { 2238 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2239 return ::isMOVLMask(N->op_begin(), N->getNumOperands()); 2240} 2241 2242/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 2243/// of what x86 movss want. X86 movs requires the lowest element to be lowest 2244/// element of vector 2 and the other elements to come from vector 1 in order. 2245static bool isCommutedMOVL(const SDOperand *Ops, unsigned NumOps, 2246 bool V2IsSplat = false, 2247 bool V2IsUndef = false) { 2248 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 2249 return false; 2250 2251 if (!isUndefOrEqual(Ops[0], 0)) 2252 return false; 2253 2254 for (unsigned i = 1; i < NumOps; ++i) { 2255 SDOperand Arg = Ops[i]; 2256 if (!(isUndefOrEqual(Arg, i+NumOps) || 2257 (V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) || 2258 (V2IsSplat && isUndefOrEqual(Arg, NumOps)))) 2259 return false; 2260 } 2261 2262 return true; 2263} 2264 2265static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false, 2266 bool V2IsUndef = false) { 2267 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2268 return isCommutedMOVL(N->op_begin(), N->getNumOperands(), 2269 V2IsSplat, V2IsUndef); 2270} 2271 2272/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2273/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 2274bool X86::isMOVSHDUPMask(SDNode *N) { 2275 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2276 2277 if (N->getNumOperands() != 4) 2278 return false; 2279 2280 // Expect 1, 1, 3, 3 2281 for (unsigned i = 0; i < 2; ++i) { 2282 SDOperand Arg = N->getOperand(i); 2283 if (Arg.getOpcode() == ISD::UNDEF) continue; 2284 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2285 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2286 if (Val != 1) return false; 2287 } 2288 2289 bool HasHi = false; 2290 for (unsigned i = 2; i < 4; ++i) { 2291 SDOperand Arg = N->getOperand(i); 2292 if (Arg.getOpcode() == ISD::UNDEF) continue; 2293 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2294 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2295 if (Val != 3) return false; 2296 HasHi = true; 2297 } 2298 2299 // Don't use movshdup if it can be done with a shufps. 2300 return HasHi; 2301} 2302 2303/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2304/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 2305bool X86::isMOVSLDUPMask(SDNode *N) { 2306 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2307 2308 if (N->getNumOperands() != 4) 2309 return false; 2310 2311 // Expect 0, 0, 2, 2 2312 for (unsigned i = 0; i < 2; ++i) { 2313 SDOperand Arg = N->getOperand(i); 2314 if (Arg.getOpcode() == ISD::UNDEF) continue; 2315 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2316 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2317 if (Val != 0) return false; 2318 } 2319 2320 bool HasHi = false; 2321 for (unsigned i = 2; i < 4; ++i) { 2322 SDOperand Arg = N->getOperand(i); 2323 if (Arg.getOpcode() == ISD::UNDEF) continue; 2324 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2325 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2326 if (Val != 2) return false; 2327 HasHi = true; 2328 } 2329 2330 // Don't use movshdup if it can be done with a shufps. 2331 return HasHi; 2332} 2333 2334/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand 2335/// specifies a identity operation on the LHS or RHS. 2336static bool isIdentityMask(SDNode *N, bool RHS = false) { 2337 unsigned NumElems = N->getNumOperands(); 2338 for (unsigned i = 0; i < NumElems; ++i) 2339 if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0))) 2340 return false; 2341 return true; 2342} 2343 2344/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2345/// a splat of a single element. 2346static bool isSplatMask(SDNode *N) { 2347 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2348 2349 // This is a splat operation if each element of the permute is the same, and 2350 // if the value doesn't reference the second vector. 2351 unsigned NumElems = N->getNumOperands(); 2352 SDOperand ElementBase; 2353 unsigned i = 0; 2354 for (; i != NumElems; ++i) { 2355 SDOperand Elt = N->getOperand(i); 2356 if (isa<ConstantSDNode>(Elt)) { 2357 ElementBase = Elt; 2358 break; 2359 } 2360 } 2361 2362 if (!ElementBase.Val) 2363 return false; 2364 2365 for (; i != NumElems; ++i) { 2366 SDOperand Arg = N->getOperand(i); 2367 if (Arg.getOpcode() == ISD::UNDEF) continue; 2368 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2369 if (Arg != ElementBase) return false; 2370 } 2371 2372 // Make sure it is a splat of the first vector operand. 2373 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems; 2374} 2375 2376/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2377/// a splat of a single element and it's a 2 or 4 element mask. 2378bool X86::isSplatMask(SDNode *N) { 2379 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2380 2381 // We can only splat 64-bit, and 32-bit quantities with a single instruction. 2382 if (N->getNumOperands() != 4 && N->getNumOperands() != 2) 2383 return false; 2384 return ::isSplatMask(N); 2385} 2386 2387/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand 2388/// specifies a splat of zero element. 2389bool X86::isSplatLoMask(SDNode *N) { 2390 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2391 2392 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) 2393 if (!isUndefOrEqual(N->getOperand(i), 0)) 2394 return false; 2395 return true; 2396} 2397 2398/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 2399/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* 2400/// instructions. 2401unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 2402 unsigned NumOperands = N->getNumOperands(); 2403 unsigned Shift = (NumOperands == 4) ? 2 : 1; 2404 unsigned Mask = 0; 2405 for (unsigned i = 0; i < NumOperands; ++i) { 2406 unsigned Val = 0; 2407 SDOperand Arg = N->getOperand(NumOperands-i-1); 2408 if (Arg.getOpcode() != ISD::UNDEF) 2409 Val = cast<ConstantSDNode>(Arg)->getValue(); 2410 if (Val >= NumOperands) Val -= NumOperands; 2411 Mask |= Val; 2412 if (i != NumOperands - 1) 2413 Mask <<= Shift; 2414 } 2415 2416 return Mask; 2417} 2418 2419/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 2420/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW 2421/// instructions. 2422unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 2423 unsigned Mask = 0; 2424 // 8 nodes, but we only care about the last 4. 2425 for (unsigned i = 7; i >= 4; --i) { 2426 unsigned Val = 0; 2427 SDOperand Arg = N->getOperand(i); 2428 if (Arg.getOpcode() != ISD::UNDEF) 2429 Val = cast<ConstantSDNode>(Arg)->getValue(); 2430 Mask |= (Val - 4); 2431 if (i != 4) 2432 Mask <<= 2; 2433 } 2434 2435 return Mask; 2436} 2437 2438/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 2439/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW 2440/// instructions. 2441unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 2442 unsigned Mask = 0; 2443 // 8 nodes, but we only care about the first 4. 2444 for (int i = 3; i >= 0; --i) { 2445 unsigned Val = 0; 2446 SDOperand Arg = N->getOperand(i); 2447 if (Arg.getOpcode() != ISD::UNDEF) 2448 Val = cast<ConstantSDNode>(Arg)->getValue(); 2449 Mask |= Val; 2450 if (i != 0) 2451 Mask <<= 2; 2452 } 2453 2454 return Mask; 2455} 2456 2457/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand 2458/// specifies a 8 element shuffle that can be broken into a pair of 2459/// PSHUFHW and PSHUFLW. 2460static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { 2461 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2462 2463 if (N->getNumOperands() != 8) 2464 return false; 2465 2466 // Lower quadword shuffled. 2467 for (unsigned i = 0; i != 4; ++i) { 2468 SDOperand Arg = N->getOperand(i); 2469 if (Arg.getOpcode() == ISD::UNDEF) continue; 2470 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2471 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2472 if (Val >= 4) 2473 return false; 2474 } 2475 2476 // Upper quadword shuffled. 2477 for (unsigned i = 4; i != 8; ++i) { 2478 SDOperand Arg = N->getOperand(i); 2479 if (Arg.getOpcode() == ISD::UNDEF) continue; 2480 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2481 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2482 if (Val < 4 || Val > 7) 2483 return false; 2484 } 2485 2486 return true; 2487} 2488 2489/// CommuteVectorShuffle - Swap vector_shuffle operands as well as 2490/// values in ther permute mask. 2491static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, 2492 SDOperand &V2, SDOperand &Mask, 2493 SelectionDAG &DAG) { 2494 MVT::ValueType VT = Op.getValueType(); 2495 MVT::ValueType MaskVT = Mask.getValueType(); 2496 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2497 unsigned NumElems = Mask.getNumOperands(); 2498 SmallVector<SDOperand, 8> MaskVec; 2499 2500 for (unsigned i = 0; i != NumElems; ++i) { 2501 SDOperand Arg = Mask.getOperand(i); 2502 if (Arg.getOpcode() == ISD::UNDEF) { 2503 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2504 continue; 2505 } 2506 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2507 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2508 if (Val < NumElems) 2509 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2510 else 2511 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2512 } 2513 2514 std::swap(V1, V2); 2515 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2516 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2517} 2518 2519/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 2520/// the two vector operands have swapped position. 2521static 2522SDOperand CommuteVectorShuffleMask(SDOperand Mask, SelectionDAG &DAG) { 2523 MVT::ValueType MaskVT = Mask.getValueType(); 2524 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2525 unsigned NumElems = Mask.getNumOperands(); 2526 SmallVector<SDOperand, 8> MaskVec; 2527 for (unsigned i = 0; i != NumElems; ++i) { 2528 SDOperand Arg = Mask.getOperand(i); 2529 if (Arg.getOpcode() == ISD::UNDEF) { 2530 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2531 continue; 2532 } 2533 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2534 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2535 if (Val < NumElems) 2536 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2537 else 2538 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2539 } 2540 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2541} 2542 2543 2544/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 2545/// match movhlps. The lower half elements should come from upper half of 2546/// V1 (and in order), and the upper half elements should come from the upper 2547/// half of V2 (and in order). 2548static bool ShouldXformToMOVHLPS(SDNode *Mask) { 2549 unsigned NumElems = Mask->getNumOperands(); 2550 if (NumElems != 4) 2551 return false; 2552 for (unsigned i = 0, e = 2; i != e; ++i) 2553 if (!isUndefOrEqual(Mask->getOperand(i), i+2)) 2554 return false; 2555 for (unsigned i = 2; i != 4; ++i) 2556 if (!isUndefOrEqual(Mask->getOperand(i), i+4)) 2557 return false; 2558 return true; 2559} 2560 2561/// isScalarLoadToVector - Returns true if the node is a scalar load that 2562/// is promoted to a vector. 2563static inline bool isScalarLoadToVector(SDNode *N) { 2564 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) { 2565 N = N->getOperand(0).Val; 2566 return ISD::isNON_EXTLoad(N); 2567 } 2568 return false; 2569} 2570 2571/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 2572/// match movlp{s|d}. The lower half elements should come from lower half of 2573/// V1 (and in order), and the upper half elements should come from the upper 2574/// half of V2 (and in order). And since V1 will become the source of the 2575/// MOVLP, it must be either a vector load or a scalar load to vector. 2576static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) { 2577 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 2578 return false; 2579 // Is V2 is a vector load, don't do this transformation. We will try to use 2580 // load folding shufps op. 2581 if (ISD::isNON_EXTLoad(V2)) 2582 return false; 2583 2584 unsigned NumElems = Mask->getNumOperands(); 2585 if (NumElems != 2 && NumElems != 4) 2586 return false; 2587 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 2588 if (!isUndefOrEqual(Mask->getOperand(i), i)) 2589 return false; 2590 for (unsigned i = NumElems/2; i != NumElems; ++i) 2591 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems)) 2592 return false; 2593 return true; 2594} 2595 2596/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 2597/// all the same. 2598static bool isSplatVector(SDNode *N) { 2599 if (N->getOpcode() != ISD::BUILD_VECTOR) 2600 return false; 2601 2602 SDOperand SplatValue = N->getOperand(0); 2603 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 2604 if (N->getOperand(i) != SplatValue) 2605 return false; 2606 return true; 2607} 2608 2609/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2610/// to an undef. 2611static bool isUndefShuffle(SDNode *N) { 2612 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2613 return false; 2614 2615 SDOperand V1 = N->getOperand(0); 2616 SDOperand V2 = N->getOperand(1); 2617 SDOperand Mask = N->getOperand(2); 2618 unsigned NumElems = Mask.getNumOperands(); 2619 for (unsigned i = 0; i != NumElems; ++i) { 2620 SDOperand Arg = Mask.getOperand(i); 2621 if (Arg.getOpcode() != ISD::UNDEF) { 2622 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2623 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF) 2624 return false; 2625 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF) 2626 return false; 2627 } 2628 } 2629 return true; 2630} 2631 2632/// isZeroNode - Returns true if Elt is a constant zero or a floating point 2633/// constant +0.0. 2634static inline bool isZeroNode(SDOperand Elt) { 2635 return ((isa<ConstantSDNode>(Elt) && 2636 cast<ConstantSDNode>(Elt)->getValue() == 0) || 2637 (isa<ConstantFPSDNode>(Elt) && 2638 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 2639} 2640 2641/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2642/// to an zero vector. 2643static bool isZeroShuffle(SDNode *N) { 2644 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2645 return false; 2646 2647 SDOperand V1 = N->getOperand(0); 2648 SDOperand V2 = N->getOperand(1); 2649 SDOperand Mask = N->getOperand(2); 2650 unsigned NumElems = Mask.getNumOperands(); 2651 for (unsigned i = 0; i != NumElems; ++i) { 2652 SDOperand Arg = Mask.getOperand(i); 2653 if (Arg.getOpcode() == ISD::UNDEF) 2654 continue; 2655 2656 unsigned Idx = cast<ConstantSDNode>(Arg)->getValue(); 2657 if (Idx < NumElems) { 2658 unsigned Opc = V1.Val->getOpcode(); 2659 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.Val)) 2660 continue; 2661 if (Opc != ISD::BUILD_VECTOR || 2662 !isZeroNode(V1.Val->getOperand(Idx))) 2663 return false; 2664 } else if (Idx >= NumElems) { 2665 unsigned Opc = V2.Val->getOpcode(); 2666 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.Val)) 2667 continue; 2668 if (Opc != ISD::BUILD_VECTOR || 2669 !isZeroNode(V2.Val->getOperand(Idx - NumElems))) 2670 return false; 2671 } 2672 } 2673 return true; 2674} 2675 2676/// getZeroVector - Returns a vector of specified type with all zero elements. 2677/// 2678static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) { 2679 assert(MVT::isVector(VT) && "Expected a vector type"); 2680 2681 // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2682 // type. This ensures they get CSE'd. 2683 SDOperand Cst = DAG.getTargetConstant(0, MVT::i32); 2684 SDOperand Vec; 2685 if (MVT::getSizeInBits(VT) == 64) // MMX 2686 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2687 else // SSE 2688 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2689 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2690} 2691 2692/// getOnesVector - Returns a vector of specified type with all bits set. 2693/// 2694static SDOperand getOnesVector(MVT::ValueType VT, SelectionDAG &DAG) { 2695 assert(MVT::isVector(VT) && "Expected a vector type"); 2696 2697 // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2698 // type. This ensures they get CSE'd. 2699 SDOperand Cst = DAG.getTargetConstant(~0U, MVT::i32); 2700 SDOperand Vec; 2701 if (MVT::getSizeInBits(VT) == 64) // MMX 2702 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2703 else // SSE 2704 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2705 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2706} 2707 2708 2709/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 2710/// that point to V2 points to its first element. 2711static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { 2712 assert(Mask.getOpcode() == ISD::BUILD_VECTOR); 2713 2714 bool Changed = false; 2715 SmallVector<SDOperand, 8> MaskVec; 2716 unsigned NumElems = Mask.getNumOperands(); 2717 for (unsigned i = 0; i != NumElems; ++i) { 2718 SDOperand Arg = Mask.getOperand(i); 2719 if (Arg.getOpcode() != ISD::UNDEF) { 2720 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2721 if (Val > NumElems) { 2722 Arg = DAG.getConstant(NumElems, Arg.getValueType()); 2723 Changed = true; 2724 } 2725 } 2726 MaskVec.push_back(Arg); 2727 } 2728 2729 if (Changed) 2730 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), 2731 &MaskVec[0], MaskVec.size()); 2732 return Mask; 2733} 2734 2735/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 2736/// operation of specified width. 2737static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { 2738 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2739 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2740 2741 SmallVector<SDOperand, 8> MaskVec; 2742 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); 2743 for (unsigned i = 1; i != NumElems; ++i) 2744 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2745 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2746} 2747 2748/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation 2749/// of specified width. 2750static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { 2751 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2752 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2753 SmallVector<SDOperand, 8> MaskVec; 2754 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 2755 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2756 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); 2757 } 2758 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2759} 2760 2761/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation 2762/// of specified width. 2763static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { 2764 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2765 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2766 unsigned Half = NumElems/2; 2767 SmallVector<SDOperand, 8> MaskVec; 2768 for (unsigned i = 0; i != Half; ++i) { 2769 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); 2770 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); 2771 } 2772 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2773} 2774 2775/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32. 2776/// 2777static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) { 2778 SDOperand V1 = Op.getOperand(0); 2779 SDOperand Mask = Op.getOperand(2); 2780 MVT::ValueType VT = Op.getValueType(); 2781 unsigned NumElems = Mask.getNumOperands(); 2782 Mask = getUnpacklMask(NumElems, DAG); 2783 while (NumElems != 4) { 2784 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask); 2785 NumElems >>= 1; 2786 } 2787 V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1); 2788 2789 Mask = getZeroVector(MVT::v4i32, DAG); 2790 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1, 2791 DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask); 2792 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); 2793} 2794 2795/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 2796/// vector of zero or undef vector. This produces a shuffle where the low 2797/// element of V2 is swizzled into the zero/undef vector, landing at element 2798/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 2799static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT, 2800 unsigned NumElems, unsigned Idx, 2801 bool isZero, SelectionDAG &DAG) { 2802 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT); 2803 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2804 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 2805 SmallVector<SDOperand, 16> MaskVec; 2806 for (unsigned i = 0; i != NumElems; ++i) 2807 if (i == Idx) // If this is the insertion idx, put the low elt of V2 here. 2808 MaskVec.push_back(DAG.getConstant(NumElems, EVT)); 2809 else 2810 MaskVec.push_back(DAG.getConstant(i, EVT)); 2811 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2812 &MaskVec[0], MaskVec.size()); 2813 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2814} 2815 2816/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 2817/// 2818static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, 2819 unsigned NumNonZero, unsigned NumZero, 2820 SelectionDAG &DAG, TargetLowering &TLI) { 2821 if (NumNonZero > 8) 2822 return SDOperand(); 2823 2824 SDOperand V(0, 0); 2825 bool First = true; 2826 for (unsigned i = 0; i < 16; ++i) { 2827 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 2828 if (ThisIsNonZero && First) { 2829 if (NumZero) 2830 V = getZeroVector(MVT::v8i16, DAG); 2831 else 2832 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2833 First = false; 2834 } 2835 2836 if ((i & 1) != 0) { 2837 SDOperand ThisElt(0, 0), LastElt(0, 0); 2838 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 2839 if (LastIsNonZero) { 2840 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); 2841 } 2842 if (ThisIsNonZero) { 2843 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i)); 2844 ThisElt = DAG.getNode(ISD::SHL, MVT::i16, 2845 ThisElt, DAG.getConstant(8, MVT::i8)); 2846 if (LastIsNonZero) 2847 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt); 2848 } else 2849 ThisElt = LastElt; 2850 2851 if (ThisElt.Val) 2852 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt, 2853 DAG.getIntPtrConstant(i/2)); 2854 } 2855 } 2856 2857 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V); 2858} 2859 2860/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 2861/// 2862static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, 2863 unsigned NumNonZero, unsigned NumZero, 2864 SelectionDAG &DAG, TargetLowering &TLI) { 2865 if (NumNonZero > 4) 2866 return SDOperand(); 2867 2868 SDOperand V(0, 0); 2869 bool First = true; 2870 for (unsigned i = 0; i < 8; ++i) { 2871 bool isNonZero = (NonZeros & (1 << i)) != 0; 2872 if (isNonZero) { 2873 if (First) { 2874 if (NumZero) 2875 V = getZeroVector(MVT::v8i16, DAG); 2876 else 2877 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2878 First = false; 2879 } 2880 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i), 2881 DAG.getIntPtrConstant(i)); 2882 } 2883 } 2884 2885 return V; 2886} 2887 2888SDOperand 2889X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 2890 // All zero's are handled with pxor, all one's are handled with pcmpeqd. 2891 if (ISD::isBuildVectorAllZeros(Op.Val) || ISD::isBuildVectorAllOnes(Op.Val)) { 2892 // Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to 2893 // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are 2894 // eliminated on x86-32 hosts. 2895 if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32) 2896 return Op; 2897 2898 if (ISD::isBuildVectorAllOnes(Op.Val)) 2899 return getOnesVector(Op.getValueType(), DAG); 2900 return getZeroVector(Op.getValueType(), DAG); 2901 } 2902 2903 MVT::ValueType VT = Op.getValueType(); 2904 MVT::ValueType EVT = MVT::getVectorElementType(VT); 2905 unsigned EVTBits = MVT::getSizeInBits(EVT); 2906 2907 unsigned NumElems = Op.getNumOperands(); 2908 unsigned NumZero = 0; 2909 unsigned NumNonZero = 0; 2910 unsigned NonZeros = 0; 2911 bool HasNonImms = false; 2912 SmallSet<SDOperand, 8> Values; 2913 for (unsigned i = 0; i < NumElems; ++i) { 2914 SDOperand Elt = Op.getOperand(i); 2915 if (Elt.getOpcode() == ISD::UNDEF) 2916 continue; 2917 Values.insert(Elt); 2918 if (Elt.getOpcode() != ISD::Constant && 2919 Elt.getOpcode() != ISD::ConstantFP) 2920 HasNonImms = true; 2921 if (isZeroNode(Elt)) 2922 NumZero++; 2923 else { 2924 NonZeros |= (1 << i); 2925 NumNonZero++; 2926 } 2927 } 2928 2929 if (NumNonZero == 0) { 2930 // All undef vector. Return an UNDEF. All zero vectors were handled above. 2931 return DAG.getNode(ISD::UNDEF, VT); 2932 } 2933 2934 // Splat is obviously ok. Let legalizer expand it to a shuffle. 2935 if (Values.size() == 1) 2936 return SDOperand(); 2937 2938 // Special case for single non-zero element. 2939 if (NumNonZero == 1 && NumElems <= 4) { 2940 unsigned Idx = CountTrailingZeros_32(NonZeros); 2941 SDOperand Item = Op.getOperand(Idx); 2942 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 2943 if (Idx == 0) 2944 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 2945 return getShuffleVectorZeroOrUndef(Item, VT, NumElems, Idx, 2946 NumZero > 0, DAG); 2947 else if (!HasNonImms) // Otherwise, it's better to do a constpool load. 2948 return SDOperand(); 2949 2950 if (EVTBits == 32) { 2951 // Turn it into a shuffle of zero and zero-extended scalar to vector. 2952 Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0, 2953 DAG); 2954 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2955 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 2956 SmallVector<SDOperand, 8> MaskVec; 2957 for (unsigned i = 0; i < NumElems; i++) 2958 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); 2959 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2960 &MaskVec[0], MaskVec.size()); 2961 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item, 2962 DAG.getNode(ISD::UNDEF, VT), Mask); 2963 } 2964 } 2965 2966 // A vector full of immediates; various special cases are already 2967 // handled, so this is best done with a single constant-pool load. 2968 if (!HasNonImms) 2969 return SDOperand(); 2970 2971 // Let legalizer expand 2-wide build_vectors. 2972 if (EVTBits == 64) 2973 return SDOperand(); 2974 2975 // If element VT is < 32 bits, convert it to inserts into a zero vector. 2976 if (EVTBits == 8 && NumElems == 16) { 2977 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 2978 *this); 2979 if (V.Val) return V; 2980 } 2981 2982 if (EVTBits == 16 && NumElems == 8) { 2983 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 2984 *this); 2985 if (V.Val) return V; 2986 } 2987 2988 // If element VT is == 32 bits, turn it into a number of shuffles. 2989 SmallVector<SDOperand, 8> V; 2990 V.resize(NumElems); 2991 if (NumElems == 4 && NumZero > 0) { 2992 for (unsigned i = 0; i < 4; ++i) { 2993 bool isZero = !(NonZeros & (1 << i)); 2994 if (isZero) 2995 V[i] = getZeroVector(VT, DAG); 2996 else 2997 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 2998 } 2999 3000 for (unsigned i = 0; i < 2; ++i) { 3001 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 3002 default: break; 3003 case 0: 3004 V[i] = V[i*2]; // Must be a zero vector. 3005 break; 3006 case 1: 3007 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2], 3008 getMOVLMask(NumElems, DAG)); 3009 break; 3010 case 2: 3011 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3012 getMOVLMask(NumElems, DAG)); 3013 break; 3014 case 3: 3015 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3016 getUnpacklMask(NumElems, DAG)); 3017 break; 3018 } 3019 } 3020 3021 // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd) 3022 // clears the upper bits. 3023 // FIXME: we can do the same for v4f32 case when we know both parts of 3024 // the lower half come from scalar_to_vector (loadf32). We should do 3025 // that in post legalizer dag combiner with target specific hooks. 3026 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0) 3027 return V[0]; 3028 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3029 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 3030 SmallVector<SDOperand, 8> MaskVec; 3031 bool Reverse = (NonZeros & 0x3) == 2; 3032 for (unsigned i = 0; i < 2; ++i) 3033 if (Reverse) 3034 MaskVec.push_back(DAG.getConstant(1-i, EVT)); 3035 else 3036 MaskVec.push_back(DAG.getConstant(i, EVT)); 3037 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 3038 for (unsigned i = 0; i < 2; ++i) 3039 if (Reverse) 3040 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); 3041 else 3042 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); 3043 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3044 &MaskVec[0], MaskVec.size()); 3045 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask); 3046 } 3047 3048 if (Values.size() > 2) { 3049 // Expand into a number of unpckl*. 3050 // e.g. for v4f32 3051 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 3052 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 3053 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 3054 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG); 3055 for (unsigned i = 0; i < NumElems; ++i) 3056 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3057 NumElems >>= 1; 3058 while (NumElems != 0) { 3059 for (unsigned i = 0; i < NumElems; ++i) 3060 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems], 3061 UnpckMask); 3062 NumElems >>= 1; 3063 } 3064 return V[0]; 3065 } 3066 3067 return SDOperand(); 3068} 3069 3070static 3071SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, 3072 SDOperand PermMask, SelectionDAG &DAG, 3073 TargetLowering &TLI) { 3074 SDOperand NewV; 3075 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(8); 3076 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3077 MVT::ValueType PtrVT = TLI.getPointerTy(); 3078 SmallVector<SDOperand, 8> MaskElts(PermMask.Val->op_begin(), 3079 PermMask.Val->op_end()); 3080 3081 // First record which half of which vector the low elements come from. 3082 SmallVector<unsigned, 4> LowQuad(4); 3083 for (unsigned i = 0; i < 4; ++i) { 3084 SDOperand Elt = MaskElts[i]; 3085 if (Elt.getOpcode() == ISD::UNDEF) 3086 continue; 3087 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3088 int QuadIdx = EltIdx / 4; 3089 ++LowQuad[QuadIdx]; 3090 } 3091 int BestLowQuad = -1; 3092 unsigned MaxQuad = 1; 3093 for (unsigned i = 0; i < 4; ++i) { 3094 if (LowQuad[i] > MaxQuad) { 3095 BestLowQuad = i; 3096 MaxQuad = LowQuad[i]; 3097 } 3098 } 3099 3100 // Record which half of which vector the high elements come from. 3101 SmallVector<unsigned, 4> HighQuad(4); 3102 for (unsigned i = 4; i < 8; ++i) { 3103 SDOperand Elt = MaskElts[i]; 3104 if (Elt.getOpcode() == ISD::UNDEF) 3105 continue; 3106 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3107 int QuadIdx = EltIdx / 4; 3108 ++HighQuad[QuadIdx]; 3109 } 3110 int BestHighQuad = -1; 3111 MaxQuad = 1; 3112 for (unsigned i = 0; i < 4; ++i) { 3113 if (HighQuad[i] > MaxQuad) { 3114 BestHighQuad = i; 3115 MaxQuad = HighQuad[i]; 3116 } 3117 } 3118 3119 // If it's possible to sort parts of either half with PSHUF{H|L}W, then do it. 3120 if (BestLowQuad != -1 || BestHighQuad != -1) { 3121 // First sort the 4 chunks in order using shufpd. 3122 SmallVector<SDOperand, 8> MaskVec; 3123 if (BestLowQuad != -1) 3124 MaskVec.push_back(DAG.getConstant(BestLowQuad, MVT::i32)); 3125 else 3126 MaskVec.push_back(DAG.getConstant(0, MVT::i32)); 3127 if (BestHighQuad != -1) 3128 MaskVec.push_back(DAG.getConstant(BestHighQuad, MVT::i32)); 3129 else 3130 MaskVec.push_back(DAG.getConstant(1, MVT::i32)); 3131 SDOperand Mask= DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, &MaskVec[0],2); 3132 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64, 3133 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V1), 3134 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V2), Mask); 3135 NewV = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, NewV); 3136 3137 // Now sort high and low parts separately. 3138 BitVector InOrder(8); 3139 if (BestLowQuad != -1) { 3140 // Sort lower half in order using PSHUFLW. 3141 MaskVec.clear(); 3142 bool AnyOutOrder = false; 3143 for (unsigned i = 0; i != 4; ++i) { 3144 SDOperand Elt = MaskElts[i]; 3145 if (Elt.getOpcode() == ISD::UNDEF) { 3146 MaskVec.push_back(Elt); 3147 InOrder.set(i); 3148 } else { 3149 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3150 if (EltIdx != i) 3151 AnyOutOrder = true; 3152 MaskVec.push_back(DAG.getConstant(EltIdx % 4, MaskEVT)); 3153 // If this element is in the right place after this shuffle, then 3154 // remember it. 3155 if ((int)(EltIdx / 4) == BestLowQuad) 3156 InOrder.set(i); 3157 } 3158 } 3159 if (AnyOutOrder) { 3160 for (unsigned i = 4; i != 8; ++i) 3161 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3162 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3163 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3164 } 3165 } 3166 3167 if (BestHighQuad != -1) { 3168 // Sort high half in order using PSHUFHW if possible. 3169 MaskVec.clear(); 3170 for (unsigned i = 0; i != 4; ++i) 3171 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3172 bool AnyOutOrder = false; 3173 for (unsigned i = 4; i != 8; ++i) { 3174 SDOperand Elt = MaskElts[i]; 3175 if (Elt.getOpcode() == ISD::UNDEF) { 3176 MaskVec.push_back(Elt); 3177 InOrder.set(i); 3178 } else { 3179 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3180 if (EltIdx != i) 3181 AnyOutOrder = true; 3182 MaskVec.push_back(DAG.getConstant((EltIdx % 4) + 4, MaskEVT)); 3183 // If this element is in the right place after this shuffle, then 3184 // remember it. 3185 if ((int)(EltIdx / 4) == BestHighQuad) 3186 InOrder.set(i); 3187 } 3188 } 3189 if (AnyOutOrder) { 3190 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3191 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3192 } 3193 } 3194 3195 // The other elements are put in the right place using pextrw and pinsrw. 3196 for (unsigned i = 0; i != 8; ++i) { 3197 if (InOrder[i]) 3198 continue; 3199 SDOperand Elt = MaskElts[i]; 3200 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3201 if (EltIdx == i) 3202 continue; 3203 SDOperand ExtOp = (EltIdx < 8) 3204 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3205 DAG.getConstant(EltIdx, PtrVT)) 3206 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3207 DAG.getConstant(EltIdx - 8, PtrVT)); 3208 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3209 DAG.getConstant(i, PtrVT)); 3210 } 3211 return NewV; 3212 } 3213 3214 // PSHUF{H|L}W are not used. Lower into extracts and inserts but try to use 3215 ///as few as possible. 3216 // First, let's find out how many elements are already in the right order. 3217 unsigned V1InOrder = 0; 3218 unsigned V1FromV1 = 0; 3219 unsigned V2InOrder = 0; 3220 unsigned V2FromV2 = 0; 3221 SmallVector<SDOperand, 8> V1Elts; 3222 SmallVector<SDOperand, 8> V2Elts; 3223 for (unsigned i = 0; i < 8; ++i) { 3224 SDOperand Elt = MaskElts[i]; 3225 if (Elt.getOpcode() == ISD::UNDEF) { 3226 V1Elts.push_back(Elt); 3227 V2Elts.push_back(Elt); 3228 ++V1InOrder; 3229 ++V2InOrder; 3230 continue; 3231 } 3232 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3233 if (EltIdx == i) { 3234 V1Elts.push_back(Elt); 3235 V2Elts.push_back(DAG.getConstant(i+8, MaskEVT)); 3236 ++V1InOrder; 3237 } else if (EltIdx == i+8) { 3238 V1Elts.push_back(Elt); 3239 V2Elts.push_back(DAG.getConstant(i, MaskEVT)); 3240 ++V2InOrder; 3241 } else if (EltIdx < 8) { 3242 V1Elts.push_back(Elt); 3243 ++V1FromV1; 3244 } else { 3245 V2Elts.push_back(DAG.getConstant(EltIdx-8, MaskEVT)); 3246 ++V2FromV2; 3247 } 3248 } 3249 3250 if (V2InOrder > V1InOrder) { 3251 PermMask = CommuteVectorShuffleMask(PermMask, DAG); 3252 std::swap(V1, V2); 3253 std::swap(V1Elts, V2Elts); 3254 std::swap(V1FromV1, V2FromV2); 3255 } 3256 3257 if ((V1FromV1 + V1InOrder) != 8) { 3258 // Some elements are from V2. 3259 if (V1FromV1) { 3260 // If there are elements that are from V1 but out of place, 3261 // then first sort them in place 3262 SmallVector<SDOperand, 8> MaskVec; 3263 for (unsigned i = 0; i < 8; ++i) { 3264 SDOperand Elt = V1Elts[i]; 3265 if (Elt.getOpcode() == ISD::UNDEF) { 3266 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3267 continue; 3268 } 3269 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3270 if (EltIdx >= 8) 3271 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3272 else 3273 MaskVec.push_back(DAG.getConstant(EltIdx, MaskEVT)); 3274 } 3275 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3276 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, V1, V1, Mask); 3277 } 3278 3279 NewV = V1; 3280 for (unsigned i = 0; i < 8; ++i) { 3281 SDOperand Elt = V1Elts[i]; 3282 if (Elt.getOpcode() == ISD::UNDEF) 3283 continue; 3284 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3285 if (EltIdx < 8) 3286 continue; 3287 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3288 DAG.getConstant(EltIdx - 8, PtrVT)); 3289 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3290 DAG.getConstant(i, PtrVT)); 3291 } 3292 return NewV; 3293 } else { 3294 // All elements are from V1. 3295 NewV = V1; 3296 for (unsigned i = 0; i < 8; ++i) { 3297 SDOperand Elt = V1Elts[i]; 3298 if (Elt.getOpcode() == ISD::UNDEF) 3299 continue; 3300 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3301 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3302 DAG.getConstant(EltIdx, PtrVT)); 3303 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3304 DAG.getConstant(i, PtrVT)); 3305 } 3306 return NewV; 3307 } 3308} 3309 3310/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 3311/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be 3312/// done when every pair / quad of shuffle mask elements point to elements in 3313/// the right sequence. e.g. 3314/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15> 3315static 3316SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2, 3317 MVT::ValueType VT, 3318 SDOperand PermMask, SelectionDAG &DAG, 3319 TargetLowering &TLI) { 3320 unsigned NumElems = PermMask.getNumOperands(); 3321 unsigned NewWidth = (NumElems == 4) ? 2 : 4; 3322 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NewWidth); 3323 MVT::ValueType NewVT = MaskVT; 3324 switch (VT) { 3325 case MVT::v4f32: NewVT = MVT::v2f64; break; 3326 case MVT::v4i32: NewVT = MVT::v2i64; break; 3327 case MVT::v8i16: NewVT = MVT::v4i32; break; 3328 case MVT::v16i8: NewVT = MVT::v4i32; break; 3329 default: assert(false && "Unexpected!"); 3330 } 3331 3332 if (NewWidth == 2) 3333 if (MVT::isInteger(VT)) 3334 NewVT = MVT::v2i64; 3335 else 3336 NewVT = MVT::v2f64; 3337 unsigned Scale = NumElems / NewWidth; 3338 SmallVector<SDOperand, 8> MaskVec; 3339 for (unsigned i = 0; i < NumElems; i += Scale) { 3340 unsigned StartIdx = ~0U; 3341 for (unsigned j = 0; j < Scale; ++j) { 3342 SDOperand Elt = PermMask.getOperand(i+j); 3343 if (Elt.getOpcode() == ISD::UNDEF) 3344 continue; 3345 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3346 if (StartIdx == ~0U) 3347 StartIdx = EltIdx - (EltIdx % Scale); 3348 if (EltIdx != StartIdx + j) 3349 return SDOperand(); 3350 } 3351 if (StartIdx == ~0U) 3352 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MVT::i32)); 3353 else 3354 MaskVec.push_back(DAG.getConstant(StartIdx / Scale, MVT::i32)); 3355 } 3356 3357 V1 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V1); 3358 V2 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V2); 3359 return DAG.getNode(ISD::VECTOR_SHUFFLE, NewVT, V1, V2, 3360 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3361 &MaskVec[0], MaskVec.size())); 3362} 3363 3364SDOperand 3365X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 3366 SDOperand V1 = Op.getOperand(0); 3367 SDOperand V2 = Op.getOperand(1); 3368 SDOperand PermMask = Op.getOperand(2); 3369 MVT::ValueType VT = Op.getValueType(); 3370 unsigned NumElems = PermMask.getNumOperands(); 3371 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 3372 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 3373 bool V1IsSplat = false; 3374 bool V2IsSplat = false; 3375 3376 if (isUndefShuffle(Op.Val)) 3377 return DAG.getNode(ISD::UNDEF, VT); 3378 3379 if (isZeroShuffle(Op.Val)) 3380 return getZeroVector(VT, DAG); 3381 3382 if (isIdentityMask(PermMask.Val)) 3383 return V1; 3384 else if (isIdentityMask(PermMask.Val, true)) 3385 return V2; 3386 3387 if (isSplatMask(PermMask.Val)) { 3388 if (NumElems <= 4) return Op; 3389 // Promote it to a v4i32 splat. 3390 return PromoteSplat(Op, DAG); 3391 } 3392 3393 // If the shuffle can be profitably rewritten as a narrower shuffle, then 3394 // do it! 3395 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 3396 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3397 if (NewOp.Val) 3398 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3399 } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 3400 // FIXME: Figure out a cleaner way to do this. 3401 // Try to make use of movq to zero out the top part. 3402 if (ISD::isBuildVectorAllZeros(V2.Val)) { 3403 SDOperand NewOp = RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3404 if (NewOp.Val) { 3405 SDOperand NewV1 = NewOp.getOperand(0); 3406 SDOperand NewV2 = NewOp.getOperand(1); 3407 SDOperand NewMask = NewOp.getOperand(2); 3408 if (isCommutedMOVL(NewMask.Val, true, false)) { 3409 NewOp = CommuteVectorShuffle(NewOp, NewV1, NewV2, NewMask, DAG); 3410 NewOp = DAG.getNode(ISD::VECTOR_SHUFFLE, NewOp.getValueType(), 3411 NewV1, NewV2, getMOVLMask(2, DAG)); 3412 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3413 } 3414 } 3415 } else if (ISD::isBuildVectorAllZeros(V1.Val)) { 3416 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3417 if (NewOp.Val && X86::isMOVLMask(NewOp.getOperand(2).Val)) 3418 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3419 } 3420 } 3421 3422 if (X86::isMOVLMask(PermMask.Val)) 3423 return (V1IsUndef) ? V2 : Op; 3424 3425 if (X86::isMOVSHDUPMask(PermMask.Val) || 3426 X86::isMOVSLDUPMask(PermMask.Val) || 3427 X86::isMOVHLPSMask(PermMask.Val) || 3428 X86::isMOVHPMask(PermMask.Val) || 3429 X86::isMOVLPMask(PermMask.Val)) 3430 return Op; 3431 3432 if (ShouldXformToMOVHLPS(PermMask.Val) || 3433 ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val)) 3434 return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3435 3436 bool Commuted = false; 3437 // FIXME: This should also accept a bitcast of a splat? Be careful, not 3438 // 1,1,1,1 -> v8i16 though. 3439 V1IsSplat = isSplatVector(V1.Val); 3440 V2IsSplat = isSplatVector(V2.Val); 3441 3442 // Canonicalize the splat or undef, if present, to be on the RHS. 3443 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 3444 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3445 std::swap(V1IsSplat, V2IsSplat); 3446 std::swap(V1IsUndef, V2IsUndef); 3447 Commuted = true; 3448 } 3449 3450 // FIXME: Figure out a cleaner way to do this. 3451 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) { 3452 if (V2IsUndef) return V1; 3453 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3454 if (V2IsSplat) { 3455 // V2 is a splat, so the mask may be malformed. That is, it may point 3456 // to any V2 element. The instruction selectior won't like this. Get 3457 // a corrected mask and commute to form a proper MOVS{S|D}. 3458 SDOperand NewMask = getMOVLMask(NumElems, DAG); 3459 if (NewMask.Val != PermMask.Val) 3460 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3461 } 3462 return Op; 3463 } 3464 3465 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3466 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3467 X86::isUNPCKLMask(PermMask.Val) || 3468 X86::isUNPCKHMask(PermMask.Val)) 3469 return Op; 3470 3471 if (V2IsSplat) { 3472 // Normalize mask so all entries that point to V2 points to its first 3473 // element then try to match unpck{h|l} again. If match, return a 3474 // new vector_shuffle with the corrected mask. 3475 SDOperand NewMask = NormalizeMask(PermMask, DAG); 3476 if (NewMask.Val != PermMask.Val) { 3477 if (X86::isUNPCKLMask(PermMask.Val, true)) { 3478 SDOperand NewMask = getUnpacklMask(NumElems, DAG); 3479 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3480 } else if (X86::isUNPCKHMask(PermMask.Val, true)) { 3481 SDOperand NewMask = getUnpackhMask(NumElems, DAG); 3482 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3483 } 3484 } 3485 } 3486 3487 // Normalize the node to match x86 shuffle ops if needed 3488 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val)) 3489 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3490 3491 if (Commuted) { 3492 // Commute is back and try unpck* again. 3493 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3494 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3495 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3496 X86::isUNPCKLMask(PermMask.Val) || 3497 X86::isUNPCKHMask(PermMask.Val)) 3498 return Op; 3499 } 3500 3501 // If VT is integer, try PSHUF* first, then SHUFP*. 3502 if (MVT::isInteger(VT)) { 3503 // MMX doesn't have PSHUFD; it does have PSHUFW. While it's theoretically 3504 // possible to shuffle a v2i32 using PSHUFW, that's not yet implemented. 3505 if (((MVT::getSizeInBits(VT) != 64 || NumElems == 4) && 3506 X86::isPSHUFDMask(PermMask.Val)) || 3507 X86::isPSHUFHWMask(PermMask.Val) || 3508 X86::isPSHUFLWMask(PermMask.Val)) { 3509 if (V2.getOpcode() != ISD::UNDEF) 3510 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3511 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3512 return Op; 3513 } 3514 3515 if (X86::isSHUFPMask(PermMask.Val) && 3516 MVT::getSizeInBits(VT) != 64) // Don't do this for MMX. 3517 return Op; 3518 } else { 3519 // Floating point cases in the other order. 3520 if (X86::isSHUFPMask(PermMask.Val)) 3521 return Op; 3522 if (X86::isPSHUFDMask(PermMask.Val) || 3523 X86::isPSHUFHWMask(PermMask.Val) || 3524 X86::isPSHUFLWMask(PermMask.Val)) { 3525 if (V2.getOpcode() != ISD::UNDEF) 3526 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3527 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3528 return Op; 3529 } 3530 } 3531 3532 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 3533 if (VT == MVT::v8i16) { 3534 SDOperand NewOp = LowerVECTOR_SHUFFLEv8i16(V1, V2, PermMask, DAG, *this); 3535 if (NewOp.Val) 3536 return NewOp; 3537 } 3538 3539 // Handle all 4 wide cases with a number of shuffles. 3540 if (NumElems == 4 && MVT::getSizeInBits(VT) != 64) { 3541 // Don't do this for MMX. 3542 MVT::ValueType MaskVT = PermMask.getValueType(); 3543 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3544 SmallVector<std::pair<int, int>, 8> Locs; 3545 Locs.reserve(NumElems); 3546 SmallVector<SDOperand, 8> Mask1(NumElems, 3547 DAG.getNode(ISD::UNDEF, MaskEVT)); 3548 SmallVector<SDOperand, 8> Mask2(NumElems, 3549 DAG.getNode(ISD::UNDEF, MaskEVT)); 3550 unsigned NumHi = 0; 3551 unsigned NumLo = 0; 3552 // If no more than two elements come from either vector. This can be 3553 // implemented with two shuffles. First shuffle gather the elements. 3554 // The second shuffle, which takes the first shuffle as both of its 3555 // vector operands, put the elements into the right order. 3556 for (unsigned i = 0; i != NumElems; ++i) { 3557 SDOperand Elt = PermMask.getOperand(i); 3558 if (Elt.getOpcode() == ISD::UNDEF) { 3559 Locs[i] = std::make_pair(-1, -1); 3560 } else { 3561 unsigned Val = cast<ConstantSDNode>(Elt)->getValue(); 3562 if (Val < NumElems) { 3563 Locs[i] = std::make_pair(0, NumLo); 3564 Mask1[NumLo] = Elt; 3565 NumLo++; 3566 } else { 3567 Locs[i] = std::make_pair(1, NumHi); 3568 if (2+NumHi < NumElems) 3569 Mask1[2+NumHi] = Elt; 3570 NumHi++; 3571 } 3572 } 3573 } 3574 if (NumLo <= 2 && NumHi <= 2) { 3575 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3576 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3577 &Mask1[0], Mask1.size())); 3578 for (unsigned i = 0; i != NumElems; ++i) { 3579 if (Locs[i].first == -1) 3580 continue; 3581 else { 3582 unsigned Idx = (i < NumElems/2) ? 0 : NumElems; 3583 Idx += Locs[i].first * (NumElems/2) + Locs[i].second; 3584 Mask2[i] = DAG.getConstant(Idx, MaskEVT); 3585 } 3586 } 3587 3588 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, 3589 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3590 &Mask2[0], Mask2.size())); 3591 } 3592 3593 // Break it into (shuffle shuffle_hi, shuffle_lo). 3594 Locs.clear(); 3595 SmallVector<SDOperand,8> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3596 SmallVector<SDOperand,8> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3597 SmallVector<SDOperand,8> *MaskPtr = &LoMask; 3598 unsigned MaskIdx = 0; 3599 unsigned LoIdx = 0; 3600 unsigned HiIdx = NumElems/2; 3601 for (unsigned i = 0; i != NumElems; ++i) { 3602 if (i == NumElems/2) { 3603 MaskPtr = &HiMask; 3604 MaskIdx = 1; 3605 LoIdx = 0; 3606 HiIdx = NumElems/2; 3607 } 3608 SDOperand Elt = PermMask.getOperand(i); 3609 if (Elt.getOpcode() == ISD::UNDEF) { 3610 Locs[i] = std::make_pair(-1, -1); 3611 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) { 3612 Locs[i] = std::make_pair(MaskIdx, LoIdx); 3613 (*MaskPtr)[LoIdx] = Elt; 3614 LoIdx++; 3615 } else { 3616 Locs[i] = std::make_pair(MaskIdx, HiIdx); 3617 (*MaskPtr)[HiIdx] = Elt; 3618 HiIdx++; 3619 } 3620 } 3621 3622 SDOperand LoShuffle = 3623 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3624 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3625 &LoMask[0], LoMask.size())); 3626 SDOperand HiShuffle = 3627 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3628 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3629 &HiMask[0], HiMask.size())); 3630 SmallVector<SDOperand, 8> MaskOps; 3631 for (unsigned i = 0; i != NumElems; ++i) { 3632 if (Locs[i].first == -1) { 3633 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3634 } else { 3635 unsigned Idx = Locs[i].first * NumElems + Locs[i].second; 3636 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT)); 3637 } 3638 } 3639 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle, 3640 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3641 &MaskOps[0], MaskOps.size())); 3642 } 3643 3644 return SDOperand(); 3645} 3646 3647SDOperand 3648X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3649 if (!isa<ConstantSDNode>(Op.getOperand(1))) 3650 return SDOperand(); 3651 3652 MVT::ValueType VT = Op.getValueType(); 3653 // TODO: handle v16i8. 3654 if (MVT::getSizeInBits(VT) == 16) { 3655 SDOperand Vec = Op.getOperand(0); 3656 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3657 if (Idx == 0) 3658 return DAG.getNode(ISD::TRUNCATE, MVT::i16, 3659 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, 3660 DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Vec), 3661 Op.getOperand(1))); 3662 // Transform it so it match pextrw which produces a 32-bit result. 3663 MVT::ValueType EVT = (MVT::ValueType)(VT+1); 3664 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, 3665 Op.getOperand(0), Op.getOperand(1)); 3666 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, 3667 DAG.getValueType(VT)); 3668 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3669 } else if (MVT::getSizeInBits(VT) == 32) { 3670 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3671 if (Idx == 0) 3672 return Op; 3673 // SHUFPS the element to the lowest double word, then movss. 3674 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3675 SmallVector<SDOperand, 8> IdxVec; 3676 IdxVec. 3677 push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT))); 3678 IdxVec. 3679 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3680 IdxVec. 3681 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3682 IdxVec. 3683 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3684 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3685 &IdxVec[0], IdxVec.size()); 3686 SDOperand Vec = Op.getOperand(0); 3687 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3688 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3689 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3690 DAG.getIntPtrConstant(0)); 3691 } else if (MVT::getSizeInBits(VT) == 64) { 3692 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3693 if (Idx == 0) 3694 return Op; 3695 3696 // UNPCKHPD the element to the lowest double word, then movsd. 3697 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 3698 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 3699 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3700 SmallVector<SDOperand, 8> IdxVec; 3701 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT))); 3702 IdxVec. 3703 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3704 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3705 &IdxVec[0], IdxVec.size()); 3706 SDOperand Vec = Op.getOperand(0); 3707 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3708 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3709 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3710 DAG.getIntPtrConstant(0)); 3711 } 3712 3713 return SDOperand(); 3714} 3715 3716SDOperand 3717X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3718 MVT::ValueType VT = Op.getValueType(); 3719 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3720 if (EVT == MVT::i8) 3721 return SDOperand(); 3722 3723 SDOperand N0 = Op.getOperand(0); 3724 SDOperand N1 = Op.getOperand(1); 3725 SDOperand N2 = Op.getOperand(2); 3726 3727 if (MVT::getSizeInBits(EVT) == 16) { 3728 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 3729 // as its second argument. 3730 if (N1.getValueType() != MVT::i32) 3731 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3732 if (N2.getValueType() != MVT::i32) 3733 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); 3734 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2); 3735 } 3736 return SDOperand(); 3737} 3738 3739SDOperand 3740X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3741 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); 3742 return DAG.getNode(X86ISD::S2VEC, Op.getValueType(), AnyExt); 3743} 3744 3745// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 3746// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 3747// one of the above mentioned nodes. It has to be wrapped because otherwise 3748// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 3749// be used to form addressing mode. These wrapped nodes will be selected 3750// into MOV32ri. 3751SDOperand 3752X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 3753 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 3754 SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(), 3755 getPointerTy(), 3756 CP->getAlignment()); 3757 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3758 // With PIC, the address is actually $g + Offset. 3759 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3760 !Subtarget->isPICStyleRIPRel()) { 3761 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3762 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3763 Result); 3764 } 3765 3766 return Result; 3767} 3768 3769SDOperand 3770X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 3771 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3772 SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); 3773 // If it's a debug information descriptor, don't mess with it. 3774 if (DAG.isVerifiedDebugInfoDesc(Op)) 3775 return Result; 3776 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3777 // With PIC, the address is actually $g + Offset. 3778 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3779 !Subtarget->isPICStyleRIPRel()) { 3780 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3781 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3782 Result); 3783 } 3784 3785 // For Darwin & Mingw32, external and weak symbols are indirect, so we want to 3786 // load the value at address GV, not the value of GV itself. This means that 3787 // the GlobalAddress must be in the base or index register of the address, not 3788 // the GV offset field. Platform check is inside GVRequiresExtraLoad() call 3789 // The same applies for external symbols during PIC codegen 3790 if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)) 3791 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, NULL, 0); 3792 3793 return Result; 3794} 3795 3796// Lower ISD::GlobalTLSAddress using the "general dynamic" model 3797static SDOperand 3798LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 3799 const MVT::ValueType PtrVT) { 3800 SDOperand InFlag; 3801 SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX, 3802 DAG.getNode(X86ISD::GlobalBaseReg, 3803 PtrVT), InFlag); 3804 InFlag = Chain.getValue(1); 3805 3806 // emit leal symbol@TLSGD(,%ebx,1), %eax 3807 SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag); 3808 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 3809 GA->getValueType(0), 3810 GA->getOffset()); 3811 SDOperand Ops[] = { Chain, TGA, InFlag }; 3812 SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3); 3813 InFlag = Result.getValue(2); 3814 Chain = Result.getValue(1); 3815 3816 // call ___tls_get_addr. This function receives its argument in 3817 // the register EAX. 3818 Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag); 3819 InFlag = Chain.getValue(1); 3820 3821 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 3822 SDOperand Ops1[] = { Chain, 3823 DAG.getTargetExternalSymbol("___tls_get_addr", 3824 PtrVT), 3825 DAG.getRegister(X86::EAX, PtrVT), 3826 DAG.getRegister(X86::EBX, PtrVT), 3827 InFlag }; 3828 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5); 3829 InFlag = Chain.getValue(1); 3830 3831 return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag); 3832} 3833 3834// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 3835// "local exec" model. 3836static SDOperand 3837LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 3838 const MVT::ValueType PtrVT) { 3839 // Get the Thread Pointer 3840 SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT); 3841 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 3842 // exec) 3843 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 3844 GA->getValueType(0), 3845 GA->getOffset()); 3846 SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA); 3847 3848 if (GA->getGlobal()->isDeclaration()) // initial exec TLS model 3849 Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, NULL, 0); 3850 3851 // The address of the thread local variable is the add of the thread 3852 // pointer with the offset of the variable. 3853 return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset); 3854} 3855 3856SDOperand 3857X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { 3858 // TODO: implement the "local dynamic" model 3859 // TODO: implement the "initial exec"model for pic executables 3860 assert(!Subtarget->is64Bit() && Subtarget->isTargetELF() && 3861 "TLS not implemented for non-ELF and 64-bit targets"); 3862 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 3863 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 3864 // otherwise use the "Local Exec"TLS Model 3865 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 3866 return LowerToTLSGeneralDynamicModel(GA, DAG, getPointerTy()); 3867 else 3868 return LowerToTLSExecModel(GA, DAG, getPointerTy()); 3869} 3870 3871SDOperand 3872X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { 3873 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 3874 SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 3875 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3876 // With PIC, the address is actually $g + Offset. 3877 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3878 !Subtarget->isPICStyleRIPRel()) { 3879 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3880 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3881 Result); 3882 } 3883 3884 return Result; 3885} 3886 3887SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 3888 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 3889 SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); 3890 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3891 // With PIC, the address is actually $g + Offset. 3892 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3893 !Subtarget->isPICStyleRIPRel()) { 3894 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3895 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3896 Result); 3897 } 3898 3899 return Result; 3900} 3901 3902/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and 3903/// take a 2 x i32 value to shift plus a shift amount. 3904SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { 3905 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 3906 "Not an i64 shift!"); 3907 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 3908 SDOperand ShOpLo = Op.getOperand(0); 3909 SDOperand ShOpHi = Op.getOperand(1); 3910 SDOperand ShAmt = Op.getOperand(2); 3911 SDOperand Tmp1 = isSRA ? 3912 DAG.getNode(ISD::SRA, MVT::i32, ShOpHi, DAG.getConstant(31, MVT::i8)) : 3913 DAG.getConstant(0, MVT::i32); 3914 3915 SDOperand Tmp2, Tmp3; 3916 if (Op.getOpcode() == ISD::SHL_PARTS) { 3917 Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt); 3918 Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt); 3919 } else { 3920 Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt); 3921 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt); 3922 } 3923 3924 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 3925 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, 3926 DAG.getConstant(32, MVT::i8)); 3927 SDOperand Cond = DAG.getNode(X86ISD::CMP, MVT::i32, 3928 AndNode, DAG.getConstant(0, MVT::i8)); 3929 3930 SDOperand Hi, Lo; 3931 SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); 3932 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag); 3933 SmallVector<SDOperand, 4> Ops; 3934 if (Op.getOpcode() == ISD::SHL_PARTS) { 3935 Ops.push_back(Tmp2); 3936 Ops.push_back(Tmp3); 3937 Ops.push_back(CC); 3938 Ops.push_back(Cond); 3939 Hi = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size()); 3940 3941 Ops.clear(); 3942 Ops.push_back(Tmp3); 3943 Ops.push_back(Tmp1); 3944 Ops.push_back(CC); 3945 Ops.push_back(Cond); 3946 Lo = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size()); 3947 } else { 3948 Ops.push_back(Tmp2); 3949 Ops.push_back(Tmp3); 3950 Ops.push_back(CC); 3951 Ops.push_back(Cond); 3952 Lo = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size()); 3953 3954 Ops.clear(); 3955 Ops.push_back(Tmp3); 3956 Ops.push_back(Tmp1); 3957 Ops.push_back(CC); 3958 Ops.push_back(Cond); 3959 Hi = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size()); 3960 } 3961 3962 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32); 3963 Ops.clear(); 3964 Ops.push_back(Lo); 3965 Ops.push_back(Hi); 3966 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size()); 3967} 3968 3969SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 3970 assert(Op.getOperand(0).getValueType() <= MVT::i64 && 3971 Op.getOperand(0).getValueType() >= MVT::i16 && 3972 "Unknown SINT_TO_FP to lower!"); 3973 3974 SDOperand Result; 3975 MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); 3976 unsigned Size = MVT::getSizeInBits(SrcVT)/8; 3977 MachineFunction &MF = DAG.getMachineFunction(); 3978 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 3979 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3980 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), 3981 StackSlot, NULL, 0); 3982 3983 // These are really Legal; caller falls through into that case. 3984 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 3985 return Result; 3986 if (SrcVT == MVT::i64 && Op.getValueType() != MVT::f80 && 3987 Subtarget->is64Bit()) 3988 return Result; 3989 3990 // Build the FILD 3991 SDVTList Tys; 3992 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 3993 if (useSSE) 3994 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 3995 else 3996 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 3997 SmallVector<SDOperand, 8> Ops; 3998 Ops.push_back(Chain); 3999 Ops.push_back(StackSlot); 4000 Ops.push_back(DAG.getValueType(SrcVT)); 4001 Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG :X86ISD::FILD, 4002 Tys, &Ops[0], Ops.size()); 4003 4004 if (useSSE) { 4005 Chain = Result.getValue(1); 4006 SDOperand InFlag = Result.getValue(2); 4007 4008 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 4009 // shouldn't be necessary except that RFP cannot be live across 4010 // multiple blocks. When stackifier is fixed, they can be uncoupled. 4011 MachineFunction &MF = DAG.getMachineFunction(); 4012 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 4013 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4014 Tys = DAG.getVTList(MVT::Other); 4015 SmallVector<SDOperand, 8> Ops; 4016 Ops.push_back(Chain); 4017 Ops.push_back(Result); 4018 Ops.push_back(StackSlot); 4019 Ops.push_back(DAG.getValueType(Op.getValueType())); 4020 Ops.push_back(InFlag); 4021 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 4022 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, NULL, 0); 4023 } 4024 4025 return Result; 4026} 4027 4028std::pair<SDOperand,SDOperand> X86TargetLowering:: 4029FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) { 4030 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 && 4031 "Unknown FP_TO_SINT to lower!"); 4032 4033 // These are really Legal. 4034 if (Op.getValueType() == MVT::i32 && 4035 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 4036 return std::make_pair(SDOperand(), SDOperand()); 4037 if (Subtarget->is64Bit() && 4038 Op.getValueType() == MVT::i64 && 4039 Op.getOperand(0).getValueType() != MVT::f80) 4040 return std::make_pair(SDOperand(), SDOperand()); 4041 4042 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 4043 // stack slot. 4044 MachineFunction &MF = DAG.getMachineFunction(); 4045 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; 4046 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4047 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4048 unsigned Opc; 4049 switch (Op.getValueType()) { 4050 default: assert(0 && "Invalid FP_TO_SINT to lower!"); 4051 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 4052 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 4053 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 4054 } 4055 4056 SDOperand Chain = DAG.getEntryNode(); 4057 SDOperand Value = Op.getOperand(0); 4058 if (isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) { 4059 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 4060 Chain = DAG.getStore(Chain, Value, StackSlot, NULL, 0); 4061 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 4062 SDOperand Ops[] = { 4063 Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType()) 4064 }; 4065 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 4066 Chain = Value.getValue(1); 4067 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4068 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4069 } 4070 4071 // Build the FP_TO_INT*_IN_MEM 4072 SDOperand Ops[] = { Chain, Value, StackSlot }; 4073 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3); 4074 4075 return std::make_pair(FIST, StackSlot); 4076} 4077 4078SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 4079 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(Op, DAG); 4080 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4081 if (FIST.Val == 0) return SDOperand(); 4082 4083 // Load the result. 4084 return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0); 4085} 4086 4087SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) { 4088 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(SDOperand(N, 0), DAG); 4089 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4090 if (FIST.Val == 0) return 0; 4091 4092 // Return an i64 load from the stack slot. 4093 SDOperand Res = DAG.getLoad(MVT::i64, FIST, StackSlot, NULL, 0); 4094 4095 // Use a MERGE_VALUES node to drop the chain result value. 4096 return DAG.getNode(ISD::MERGE_VALUES, MVT::i64, Res).Val; 4097} 4098 4099SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { 4100 MVT::ValueType VT = Op.getValueType(); 4101 MVT::ValueType EltVT = VT; 4102 if (MVT::isVector(VT)) 4103 EltVT = MVT::getVectorElementType(VT); 4104 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 4105 std::vector<Constant*> CV; 4106 if (EltVT == MVT::f64) { 4107 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, ~(1ULL << 63)))); 4108 CV.push_back(C); 4109 CV.push_back(C); 4110 } else { 4111 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, ~(1U << 31)))); 4112 CV.push_back(C); 4113 CV.push_back(C); 4114 CV.push_back(C); 4115 CV.push_back(C); 4116 } 4117 Constant *C = ConstantVector::get(CV); 4118 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4119 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0, 4120 false, 16); 4121 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask); 4122} 4123 4124SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { 4125 MVT::ValueType VT = Op.getValueType(); 4126 MVT::ValueType EltVT = VT; 4127 unsigned EltNum = 1; 4128 if (MVT::isVector(VT)) { 4129 EltVT = MVT::getVectorElementType(VT); 4130 EltNum = MVT::getVectorNumElements(VT); 4131 } 4132 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 4133 std::vector<Constant*> CV; 4134 if (EltVT == MVT::f64) { 4135 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, 1ULL << 63))); 4136 CV.push_back(C); 4137 CV.push_back(C); 4138 } else { 4139 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, 1U << 31))); 4140 CV.push_back(C); 4141 CV.push_back(C); 4142 CV.push_back(C); 4143 CV.push_back(C); 4144 } 4145 Constant *C = ConstantVector::get(CV); 4146 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4147 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0, 4148 false, 16); 4149 if (MVT::isVector(VT)) { 4150 return DAG.getNode(ISD::BIT_CONVERT, VT, 4151 DAG.getNode(ISD::XOR, MVT::v2i64, 4152 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)), 4153 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Mask))); 4154 } else { 4155 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask); 4156 } 4157} 4158 4159SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { 4160 SDOperand Op0 = Op.getOperand(0); 4161 SDOperand Op1 = Op.getOperand(1); 4162 MVT::ValueType VT = Op.getValueType(); 4163 MVT::ValueType SrcVT = Op1.getValueType(); 4164 const Type *SrcTy = MVT::getTypeForValueType(SrcVT); 4165 4166 // If second operand is smaller, extend it first. 4167 if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) { 4168 Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1); 4169 SrcVT = VT; 4170 SrcTy = MVT::getTypeForValueType(SrcVT); 4171 } 4172 // And if it is bigger, shrink it first. 4173 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4174 Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1, DAG.getIntPtrConstant(1)); 4175 SrcVT = VT; 4176 SrcTy = MVT::getTypeForValueType(SrcVT); 4177 } 4178 4179 // At this point the operands and the result should have the same 4180 // type, and that won't be f80 since that is not custom lowered. 4181 4182 // First get the sign bit of second operand. 4183 std::vector<Constant*> CV; 4184 if (SrcVT == MVT::f64) { 4185 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 1ULL << 63)))); 4186 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4187 } else { 4188 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 1U << 31)))); 4189 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4190 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4191 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4192 } 4193 Constant *C = ConstantVector::get(CV); 4194 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4195 SDOperand Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx, NULL, 0, 4196 false, 16); 4197 SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); 4198 4199 // Shift sign bit right or left if the two operands have different types. 4200 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4201 // Op0 is MVT::f32, Op1 is MVT::f64. 4202 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit); 4203 SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit, 4204 DAG.getConstant(32, MVT::i32)); 4205 SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit); 4206 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit, 4207 DAG.getIntPtrConstant(0)); 4208 } 4209 4210 // Clear first operand sign bit. 4211 CV.clear(); 4212 if (VT == MVT::f64) { 4213 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, ~(1ULL << 63))))); 4214 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4215 } else { 4216 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, ~(1U << 31))))); 4217 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4218 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4219 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4220 } 4221 C = ConstantVector::get(CV); 4222 CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4223 SDOperand Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0, 4224 false, 16); 4225 SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2); 4226 4227 // Or the value with the sign bit. 4228 return DAG.getNode(X86ISD::FOR, VT, Val, SignBit); 4229} 4230 4231SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { 4232 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 4233 SDOperand Cond; 4234 SDOperand Op0 = Op.getOperand(0); 4235 SDOperand Op1 = Op.getOperand(1); 4236 SDOperand CC = Op.getOperand(2); 4237 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 4238 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); 4239 unsigned X86CC; 4240 4241 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, 4242 Op0, Op1, DAG)) { 4243 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4244 return DAG.getNode(X86ISD::SETCC, MVT::i8, 4245 DAG.getConstant(X86CC, MVT::i8), Cond); 4246 } 4247 4248 assert(isFP && "Illegal integer SetCC!"); 4249 4250 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4251 switch (SetCCOpcode) { 4252 default: assert(false && "Illegal floating point SetCC!"); 4253 case ISD::SETOEQ: { // !PF & ZF 4254 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4255 DAG.getConstant(X86::COND_NP, MVT::i8), Cond); 4256 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4257 DAG.getConstant(X86::COND_E, MVT::i8), Cond); 4258 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); 4259 } 4260 case ISD::SETUNE: { // PF | !ZF 4261 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4262 DAG.getConstant(X86::COND_P, MVT::i8), Cond); 4263 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4264 DAG.getConstant(X86::COND_NE, MVT::i8), Cond); 4265 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); 4266 } 4267 } 4268} 4269 4270 4271SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { 4272 bool addTest = true; 4273 SDOperand Cond = Op.getOperand(0); 4274 SDOperand CC; 4275 4276 if (Cond.getOpcode() == ISD::SETCC) 4277 Cond = LowerSETCC(Cond, DAG); 4278 4279 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4280 // setting operand in place of the X86ISD::SETCC. 4281 if (Cond.getOpcode() == X86ISD::SETCC) { 4282 CC = Cond.getOperand(0); 4283 4284 SDOperand Cmp = Cond.getOperand(1); 4285 unsigned Opc = Cmp.getOpcode(); 4286 MVT::ValueType VT = Op.getValueType(); 4287 4288 bool IllegalFPCMov = false; 4289 if (MVT::isFloatingPoint(VT) && !MVT::isVector(VT) && 4290 !isScalarFPTypeInSSEReg(VT)) // FPStack? 4291 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 4292 4293 if ((Opc == X86ISD::CMP || 4294 Opc == X86ISD::COMI || 4295 Opc == X86ISD::UCOMI) && !IllegalFPCMov) { 4296 Cond = Cmp; 4297 addTest = false; 4298 } 4299 } 4300 4301 if (addTest) { 4302 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4303 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4304 } 4305 4306 const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(), 4307 MVT::Flag); 4308 SmallVector<SDOperand, 4> Ops; 4309 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 4310 // condition is true. 4311 Ops.push_back(Op.getOperand(2)); 4312 Ops.push_back(Op.getOperand(1)); 4313 Ops.push_back(CC); 4314 Ops.push_back(Cond); 4315 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 4316} 4317 4318SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { 4319 bool addTest = true; 4320 SDOperand Chain = Op.getOperand(0); 4321 SDOperand Cond = Op.getOperand(1); 4322 SDOperand Dest = Op.getOperand(2); 4323 SDOperand CC; 4324 4325 if (Cond.getOpcode() == ISD::SETCC) 4326 Cond = LowerSETCC(Cond, DAG); 4327 4328 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4329 // setting operand in place of the X86ISD::SETCC. 4330 if (Cond.getOpcode() == X86ISD::SETCC) { 4331 CC = Cond.getOperand(0); 4332 4333 SDOperand Cmp = Cond.getOperand(1); 4334 unsigned Opc = Cmp.getOpcode(); 4335 if (Opc == X86ISD::CMP || 4336 Opc == X86ISD::COMI || 4337 Opc == X86ISD::UCOMI) { 4338 Cond = Cmp; 4339 addTest = false; 4340 } 4341 } 4342 4343 if (addTest) { 4344 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4345 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4346 } 4347 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), 4348 Chain, Op.getOperand(2), CC, Cond); 4349} 4350 4351 4352// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 4353// Calls to _alloca is needed to probe the stack when allocating more than 4k 4354// bytes in one go. Touching the stack at 4K increments is necessary to ensure 4355// that the guard pages used by the OS virtual memory manager are allocated in 4356// correct sequence. 4357SDOperand 4358X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, 4359 SelectionDAG &DAG) { 4360 assert(Subtarget->isTargetCygMing() && 4361 "This should be used only on Cygwin/Mingw targets"); 4362 4363 // Get the inputs. 4364 SDOperand Chain = Op.getOperand(0); 4365 SDOperand Size = Op.getOperand(1); 4366 // FIXME: Ensure alignment here 4367 4368 SDOperand Flag; 4369 4370 MVT::ValueType IntPtr = getPointerTy(); 4371 MVT::ValueType SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; 4372 4373 Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag); 4374 Flag = Chain.getValue(1); 4375 4376 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4377 SDOperand Ops[] = { Chain, 4378 DAG.getTargetExternalSymbol("_alloca", IntPtr), 4379 DAG.getRegister(X86::EAX, IntPtr), 4380 Flag }; 4381 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4); 4382 Flag = Chain.getValue(1); 4383 4384 Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1); 4385 4386 std::vector<MVT::ValueType> Tys; 4387 Tys.push_back(SPTy); 4388 Tys.push_back(MVT::Other); 4389 SDOperand Ops1[2] = { Chain.getValue(0), Chain }; 4390 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2); 4391} 4392 4393SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) { 4394 SDOperand InFlag(0, 0); 4395 SDOperand Chain = Op.getOperand(0); 4396 unsigned Align = 4397 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 4398 if (Align == 0) Align = 1; 4399 4400 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4401 // If not DWORD aligned or size is more than the threshold, call memset. 4402 // The libc version is likely to be faster for these cases. It can use the 4403 // address value and run time information about the CPU. 4404 if ((Align & 3) != 0 || 4405 (I && I->getValue() > Subtarget->getMaxInlineSizeThreshold())) { 4406 MVT::ValueType IntPtr = getPointerTy(); 4407 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 4408 TargetLowering::ArgListTy Args; 4409 TargetLowering::ArgListEntry Entry; 4410 Entry.Node = Op.getOperand(1); 4411 Entry.Ty = IntPtrTy; 4412 Args.push_back(Entry); 4413 // Extend the unsigned i8 argument to be an int value for the call. 4414 Entry.Node = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2)); 4415 Entry.Ty = IntPtrTy; 4416 Args.push_back(Entry); 4417 Entry.Node = Op.getOperand(3); 4418 Args.push_back(Entry); 4419 std::pair<SDOperand,SDOperand> CallResult = 4420 LowerCallTo(Chain, Type::VoidTy, false, false, CallingConv::C, false, 4421 DAG.getExternalSymbol("memset", IntPtr), Args, DAG); 4422 return CallResult.second; 4423 } 4424 4425 MVT::ValueType AVT; 4426 SDOperand Count; 4427 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4428 unsigned BytesLeft = 0; 4429 bool TwoRepStos = false; 4430 if (ValC) { 4431 unsigned ValReg; 4432 uint64_t Val = ValC->getValue() & 255; 4433 4434 // If the value is a constant, then we can potentially use larger sets. 4435 switch (Align & 3) { 4436 case 2: // WORD aligned 4437 AVT = MVT::i16; 4438 ValReg = X86::AX; 4439 Val = (Val << 8) | Val; 4440 break; 4441 case 0: // DWORD aligned 4442 AVT = MVT::i32; 4443 ValReg = X86::EAX; 4444 Val = (Val << 8) | Val; 4445 Val = (Val << 16) | Val; 4446 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned 4447 AVT = MVT::i64; 4448 ValReg = X86::RAX; 4449 Val = (Val << 32) | Val; 4450 } 4451 break; 4452 default: // Byte aligned 4453 AVT = MVT::i8; 4454 ValReg = X86::AL; 4455 Count = Op.getOperand(3); 4456 break; 4457 } 4458 4459 if (AVT > MVT::i8) { 4460 if (I) { 4461 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4462 Count = DAG.getIntPtrConstant(I->getValue() / UBytes); 4463 BytesLeft = I->getValue() % UBytes; 4464 } else { 4465 assert(AVT >= MVT::i32 && 4466 "Do not use rep;stos if not at least DWORD aligned"); 4467 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 4468 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 4469 TwoRepStos = true; 4470 } 4471 } 4472 4473 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT), 4474 InFlag); 4475 InFlag = Chain.getValue(1); 4476 } else { 4477 AVT = MVT::i8; 4478 Count = Op.getOperand(3); 4479 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag); 4480 InFlag = Chain.getValue(1); 4481 } 4482 4483 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4484 Count, InFlag); 4485 InFlag = Chain.getValue(1); 4486 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4487 Op.getOperand(1), InFlag); 4488 InFlag = Chain.getValue(1); 4489 4490 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4491 SmallVector<SDOperand, 8> Ops; 4492 Ops.push_back(Chain); 4493 Ops.push_back(DAG.getValueType(AVT)); 4494 Ops.push_back(InFlag); 4495 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4496 4497 if (TwoRepStos) { 4498 InFlag = Chain.getValue(1); 4499 Count = Op.getOperand(3); 4500 MVT::ValueType CVT = Count.getValueType(); 4501 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4502 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4503 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4504 Left, InFlag); 4505 InFlag = Chain.getValue(1); 4506 Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4507 Ops.clear(); 4508 Ops.push_back(Chain); 4509 Ops.push_back(DAG.getValueType(MVT::i8)); 4510 Ops.push_back(InFlag); 4511 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4512 } else if (BytesLeft) { 4513 // Issue stores for the last 1 - 7 bytes. 4514 SDOperand Value; 4515 unsigned Val = ValC->getValue() & 255; 4516 unsigned Offset = I->getValue() - BytesLeft; 4517 SDOperand DstAddr = Op.getOperand(1); 4518 MVT::ValueType AddrVT = DstAddr.getValueType(); 4519 if (BytesLeft >= 4) { 4520 Val = (Val << 8) | Val; 4521 Val = (Val << 16) | Val; 4522 Value = DAG.getConstant(Val, MVT::i32); 4523 Chain = DAG.getStore(Chain, Value, 4524 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4525 DAG.getConstant(Offset, AddrVT)), 4526 NULL, 0); 4527 BytesLeft -= 4; 4528 Offset += 4; 4529 } 4530 if (BytesLeft >= 2) { 4531 Value = DAG.getConstant((Val << 8) | Val, MVT::i16); 4532 Chain = DAG.getStore(Chain, Value, 4533 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4534 DAG.getConstant(Offset, AddrVT)), 4535 NULL, 0); 4536 BytesLeft -= 2; 4537 Offset += 2; 4538 } 4539 if (BytesLeft == 1) { 4540 Value = DAG.getConstant(Val, MVT::i8); 4541 Chain = DAG.getStore(Chain, Value, 4542 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4543 DAG.getConstant(Offset, AddrVT)), 4544 NULL, 0); 4545 } 4546 } 4547 4548 return Chain; 4549} 4550 4551SDOperand X86TargetLowering::LowerMEMCPYInline(SDOperand Chain, 4552 SDOperand Dest, 4553 SDOperand Source, 4554 unsigned Size, 4555 unsigned Align, 4556 SelectionDAG &DAG) { 4557 MVT::ValueType AVT; 4558 unsigned BytesLeft = 0; 4559 switch (Align & 3) { 4560 case 2: // WORD aligned 4561 AVT = MVT::i16; 4562 break; 4563 case 0: // DWORD aligned 4564 AVT = MVT::i32; 4565 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned 4566 AVT = MVT::i64; 4567 break; 4568 default: // Byte aligned 4569 AVT = MVT::i8; 4570 break; 4571 } 4572 4573 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4574 SDOperand Count = DAG.getIntPtrConstant(Size / UBytes); 4575 BytesLeft = Size % UBytes; 4576 4577 SDOperand InFlag(0, 0); 4578 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4579 Count, InFlag); 4580 InFlag = Chain.getValue(1); 4581 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4582 Dest, InFlag); 4583 InFlag = Chain.getValue(1); 4584 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI, 4585 Source, InFlag); 4586 InFlag = Chain.getValue(1); 4587 4588 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4589 SmallVector<SDOperand, 8> Ops; 4590 Ops.push_back(Chain); 4591 Ops.push_back(DAG.getValueType(AVT)); 4592 Ops.push_back(InFlag); 4593 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4594 4595 if (BytesLeft) { 4596 // Issue loads and stores for the last 1 - 7 bytes. 4597 unsigned Offset = Size - BytesLeft; 4598 SDOperand DstAddr = Dest; 4599 MVT::ValueType DstVT = DstAddr.getValueType(); 4600 SDOperand SrcAddr = Source; 4601 MVT::ValueType SrcVT = SrcAddr.getValueType(); 4602 SDOperand Value; 4603 if (BytesLeft >= 4) { 4604 Value = DAG.getLoad(MVT::i32, Chain, 4605 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4606 DAG.getConstant(Offset, SrcVT)), 4607 NULL, 0); 4608 Chain = Value.getValue(1); 4609 Chain = DAG.getStore(Chain, Value, 4610 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4611 DAG.getConstant(Offset, DstVT)), 4612 NULL, 0); 4613 BytesLeft -= 4; 4614 Offset += 4; 4615 } 4616 if (BytesLeft >= 2) { 4617 Value = DAG.getLoad(MVT::i16, Chain, 4618 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4619 DAG.getConstant(Offset, SrcVT)), 4620 NULL, 0); 4621 Chain = Value.getValue(1); 4622 Chain = DAG.getStore(Chain, Value, 4623 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4624 DAG.getConstant(Offset, DstVT)), 4625 NULL, 0); 4626 BytesLeft -= 2; 4627 Offset += 2; 4628 } 4629 4630 if (BytesLeft == 1) { 4631 Value = DAG.getLoad(MVT::i8, Chain, 4632 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4633 DAG.getConstant(Offset, SrcVT)), 4634 NULL, 0); 4635 Chain = Value.getValue(1); 4636 Chain = DAG.getStore(Chain, Value, 4637 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4638 DAG.getConstant(Offset, DstVT)), 4639 NULL, 0); 4640 } 4641 } 4642 4643 return Chain; 4644} 4645 4646/// Expand the result of: i64,outchain = READCYCLECOUNTER inchain 4647SDNode *X86TargetLowering::ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG){ 4648 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4649 SDOperand TheChain = N->getOperand(0); 4650 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheChain, 1); 4651 if (Subtarget->is64Bit()) { 4652 SDOperand rax = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); 4653 SDOperand rdx = DAG.getCopyFromReg(rax.getValue(1), X86::RDX, 4654 MVT::i64, rax.getValue(2)); 4655 SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, rdx, 4656 DAG.getConstant(32, MVT::i8)); 4657 SDOperand Ops[] = { 4658 DAG.getNode(ISD::OR, MVT::i64, rax, Tmp), rdx.getValue(1) 4659 }; 4660 4661 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4662 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4663 } 4664 4665 SDOperand eax = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); 4666 SDOperand edx = DAG.getCopyFromReg(eax.getValue(1), X86::EDX, 4667 MVT::i32, eax.getValue(2)); 4668 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 4669 SDOperand Ops[] = { eax, edx }; 4670 Ops[0] = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Ops, 2); 4671 4672 // Use a MERGE_VALUES to return the value and chain. 4673 Ops[1] = edx.getValue(1); 4674 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4675 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4676} 4677 4678SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { 4679 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2)); 4680 4681 if (!Subtarget->is64Bit()) { 4682 // vastart just stores the address of the VarArgsFrameIndex slot into the 4683 // memory location argument. 4684 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4685 return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV->getValue(), 4686 SV->getOffset()); 4687 } 4688 4689 // __va_list_tag: 4690 // gp_offset (0 - 6 * 8) 4691 // fp_offset (48 - 48 + 8 * 16) 4692 // overflow_arg_area (point to parameters coming in memory). 4693 // reg_save_area 4694 SmallVector<SDOperand, 8> MemOps; 4695 SDOperand FIN = Op.getOperand(1); 4696 // Store gp_offset 4697 SDOperand Store = DAG.getStore(Op.getOperand(0), 4698 DAG.getConstant(VarArgsGPOffset, MVT::i32), 4699 FIN, SV->getValue(), SV->getOffset()); 4700 MemOps.push_back(Store); 4701 4702 // Store fp_offset 4703 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4704 Store = DAG.getStore(Op.getOperand(0), 4705 DAG.getConstant(VarArgsFPOffset, MVT::i32), 4706 FIN, SV->getValue(), SV->getOffset()); 4707 MemOps.push_back(Store); 4708 4709 // Store ptr to overflow_arg_area 4710 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4711 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4712 Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV->getValue(), 4713 SV->getOffset()); 4714 MemOps.push_back(Store); 4715 4716 // Store ptr to reg_save_area. 4717 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); 4718 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 4719 Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV->getValue(), 4720 SV->getOffset()); 4721 MemOps.push_back(Store); 4722 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size()); 4723} 4724 4725SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) { 4726 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 4727 SDOperand Chain = Op.getOperand(0); 4728 SDOperand DstPtr = Op.getOperand(1); 4729 SDOperand SrcPtr = Op.getOperand(2); 4730 SrcValueSDNode *DstSV = cast<SrcValueSDNode>(Op.getOperand(3)); 4731 SrcValueSDNode *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4)); 4732 4733 SrcPtr = DAG.getLoad(getPointerTy(), Chain, SrcPtr, 4734 SrcSV->getValue(), SrcSV->getOffset()); 4735 Chain = SrcPtr.getValue(1); 4736 for (unsigned i = 0; i < 3; ++i) { 4737 SDOperand Val = DAG.getLoad(MVT::i64, Chain, SrcPtr, 4738 SrcSV->getValue(), SrcSV->getOffset()); 4739 Chain = Val.getValue(1); 4740 Chain = DAG.getStore(Chain, Val, DstPtr, 4741 DstSV->getValue(), DstSV->getOffset()); 4742 if (i == 2) 4743 break; 4744 SrcPtr = DAG.getNode(ISD::ADD, getPointerTy(), SrcPtr, 4745 DAG.getIntPtrConstant(8)); 4746 DstPtr = DAG.getNode(ISD::ADD, getPointerTy(), DstPtr, 4747 DAG.getIntPtrConstant(8)); 4748 } 4749 return Chain; 4750} 4751 4752SDOperand 4753X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 4754 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); 4755 switch (IntNo) { 4756 default: return SDOperand(); // Don't custom lower most intrinsics. 4757 // Comparison intrinsics. 4758 case Intrinsic::x86_sse_comieq_ss: 4759 case Intrinsic::x86_sse_comilt_ss: 4760 case Intrinsic::x86_sse_comile_ss: 4761 case Intrinsic::x86_sse_comigt_ss: 4762 case Intrinsic::x86_sse_comige_ss: 4763 case Intrinsic::x86_sse_comineq_ss: 4764 case Intrinsic::x86_sse_ucomieq_ss: 4765 case Intrinsic::x86_sse_ucomilt_ss: 4766 case Intrinsic::x86_sse_ucomile_ss: 4767 case Intrinsic::x86_sse_ucomigt_ss: 4768 case Intrinsic::x86_sse_ucomige_ss: 4769 case Intrinsic::x86_sse_ucomineq_ss: 4770 case Intrinsic::x86_sse2_comieq_sd: 4771 case Intrinsic::x86_sse2_comilt_sd: 4772 case Intrinsic::x86_sse2_comile_sd: 4773 case Intrinsic::x86_sse2_comigt_sd: 4774 case Intrinsic::x86_sse2_comige_sd: 4775 case Intrinsic::x86_sse2_comineq_sd: 4776 case Intrinsic::x86_sse2_ucomieq_sd: 4777 case Intrinsic::x86_sse2_ucomilt_sd: 4778 case Intrinsic::x86_sse2_ucomile_sd: 4779 case Intrinsic::x86_sse2_ucomigt_sd: 4780 case Intrinsic::x86_sse2_ucomige_sd: 4781 case Intrinsic::x86_sse2_ucomineq_sd: { 4782 unsigned Opc = 0; 4783 ISD::CondCode CC = ISD::SETCC_INVALID; 4784 switch (IntNo) { 4785 default: break; 4786 case Intrinsic::x86_sse_comieq_ss: 4787 case Intrinsic::x86_sse2_comieq_sd: 4788 Opc = X86ISD::COMI; 4789 CC = ISD::SETEQ; 4790 break; 4791 case Intrinsic::x86_sse_comilt_ss: 4792 case Intrinsic::x86_sse2_comilt_sd: 4793 Opc = X86ISD::COMI; 4794 CC = ISD::SETLT; 4795 break; 4796 case Intrinsic::x86_sse_comile_ss: 4797 case Intrinsic::x86_sse2_comile_sd: 4798 Opc = X86ISD::COMI; 4799 CC = ISD::SETLE; 4800 break; 4801 case Intrinsic::x86_sse_comigt_ss: 4802 case Intrinsic::x86_sse2_comigt_sd: 4803 Opc = X86ISD::COMI; 4804 CC = ISD::SETGT; 4805 break; 4806 case Intrinsic::x86_sse_comige_ss: 4807 case Intrinsic::x86_sse2_comige_sd: 4808 Opc = X86ISD::COMI; 4809 CC = ISD::SETGE; 4810 break; 4811 case Intrinsic::x86_sse_comineq_ss: 4812 case Intrinsic::x86_sse2_comineq_sd: 4813 Opc = X86ISD::COMI; 4814 CC = ISD::SETNE; 4815 break; 4816 case Intrinsic::x86_sse_ucomieq_ss: 4817 case Intrinsic::x86_sse2_ucomieq_sd: 4818 Opc = X86ISD::UCOMI; 4819 CC = ISD::SETEQ; 4820 break; 4821 case Intrinsic::x86_sse_ucomilt_ss: 4822 case Intrinsic::x86_sse2_ucomilt_sd: 4823 Opc = X86ISD::UCOMI; 4824 CC = ISD::SETLT; 4825 break; 4826 case Intrinsic::x86_sse_ucomile_ss: 4827 case Intrinsic::x86_sse2_ucomile_sd: 4828 Opc = X86ISD::UCOMI; 4829 CC = ISD::SETLE; 4830 break; 4831 case Intrinsic::x86_sse_ucomigt_ss: 4832 case Intrinsic::x86_sse2_ucomigt_sd: 4833 Opc = X86ISD::UCOMI; 4834 CC = ISD::SETGT; 4835 break; 4836 case Intrinsic::x86_sse_ucomige_ss: 4837 case Intrinsic::x86_sse2_ucomige_sd: 4838 Opc = X86ISD::UCOMI; 4839 CC = ISD::SETGE; 4840 break; 4841 case Intrinsic::x86_sse_ucomineq_ss: 4842 case Intrinsic::x86_sse2_ucomineq_sd: 4843 Opc = X86ISD::UCOMI; 4844 CC = ISD::SETNE; 4845 break; 4846 } 4847 4848 unsigned X86CC; 4849 SDOperand LHS = Op.getOperand(1); 4850 SDOperand RHS = Op.getOperand(2); 4851 translateX86CC(CC, true, X86CC, LHS, RHS, DAG); 4852 4853 SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS); 4854 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8, 4855 DAG.getConstant(X86CC, MVT::i8), Cond); 4856 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); 4857 } 4858 } 4859} 4860 4861SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { 4862 // Depths > 0 not supported yet! 4863 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 4864 return SDOperand(); 4865 4866 // Just load the return address 4867 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 4868 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); 4869} 4870 4871SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { 4872 // Depths > 0 not supported yet! 4873 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 4874 return SDOperand(); 4875 4876 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 4877 return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI, 4878 DAG.getIntPtrConstant(4)); 4879} 4880 4881SDOperand X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDOperand Op, 4882 SelectionDAG &DAG) { 4883 // Is not yet supported on x86-64 4884 if (Subtarget->is64Bit()) 4885 return SDOperand(); 4886 4887 return DAG.getIntPtrConstant(8); 4888} 4889 4890SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG) 4891{ 4892 assert(!Subtarget->is64Bit() && 4893 "Lowering of eh_return builtin is not supported yet on x86-64"); 4894 4895 MachineFunction &MF = DAG.getMachineFunction(); 4896 SDOperand Chain = Op.getOperand(0); 4897 SDOperand Offset = Op.getOperand(1); 4898 SDOperand Handler = Op.getOperand(2); 4899 4900 SDOperand Frame = DAG.getRegister(RegInfo->getFrameRegister(MF), 4901 getPointerTy()); 4902 4903 SDOperand StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame, 4904 DAG.getIntPtrConstant(-4UL)); 4905 StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset); 4906 Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0); 4907 Chain = DAG.getCopyToReg(Chain, X86::ECX, StoreAddr); 4908 MF.getRegInfo().addLiveOut(X86::ECX); 4909 4910 return DAG.getNode(X86ISD::EH_RETURN, MVT::Other, 4911 Chain, DAG.getRegister(X86::ECX, getPointerTy())); 4912} 4913 4914SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, 4915 SelectionDAG &DAG) { 4916 SDOperand Root = Op.getOperand(0); 4917 SDOperand Trmp = Op.getOperand(1); // trampoline 4918 SDOperand FPtr = Op.getOperand(2); // nested function 4919 SDOperand Nest = Op.getOperand(3); // 'nest' parameter value 4920 4921 SrcValueSDNode *TrmpSV = cast<SrcValueSDNode>(Op.getOperand(4)); 4922 4923 const X86InstrInfo *TII = 4924 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 4925 4926 if (Subtarget->is64Bit()) { 4927 SDOperand OutChains[6]; 4928 4929 // Large code-model. 4930 4931 const unsigned char JMP64r = TII->getBaseOpcodeFor(X86::JMP64r); 4932 const unsigned char MOV64ri = TII->getBaseOpcodeFor(X86::MOV64ri); 4933 4934 const unsigned char N86R10 = 4935 ((X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R10); 4936 const unsigned char N86R11 = 4937 ((X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R11); 4938 4939 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 4940 4941 // Load the pointer to the nested function into R11. 4942 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 4943 SDOperand Addr = Trmp; 4944 OutChains[0] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 4945 TrmpSV->getValue(), TrmpSV->getOffset()); 4946 4947 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(2, MVT::i64)); 4948 OutChains[1] = DAG.getStore(Root, FPtr, Addr, TrmpSV->getValue(), 4949 TrmpSV->getOffset() + 2, false, 2); 4950 4951 // Load the 'nest' parameter value into R10. 4952 // R10 is specified in X86CallingConv.td 4953 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 4954 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(10, MVT::i64)); 4955 OutChains[2] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 4956 TrmpSV->getValue(), TrmpSV->getOffset() + 10); 4957 4958 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(12, MVT::i64)); 4959 OutChains[3] = DAG.getStore(Root, Nest, Addr, TrmpSV->getValue(), 4960 TrmpSV->getOffset() + 12, false, 2); 4961 4962 // Jump to the nested function. 4963 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 4964 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(20, MVT::i64)); 4965 OutChains[4] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 4966 TrmpSV->getValue(), TrmpSV->getOffset() + 20); 4967 4968 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 4969 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(22, MVT::i64)); 4970 OutChains[5] = DAG.getStore(Root, DAG.getConstant(ModRM, MVT::i8), Addr, 4971 TrmpSV->getValue(), TrmpSV->getOffset() + 22); 4972 4973 SDOperand Ops[] = 4974 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 6) }; 4975 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 4976 } else { 4977 const Function *Func = 4978 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 4979 unsigned CC = Func->getCallingConv(); 4980 unsigned NestReg; 4981 4982 switch (CC) { 4983 default: 4984 assert(0 && "Unsupported calling convention"); 4985 case CallingConv::C: 4986 case CallingConv::X86_StdCall: { 4987 // Pass 'nest' parameter in ECX. 4988 // Must be kept in sync with X86CallingConv.td 4989 NestReg = X86::ECX; 4990 4991 // Check that ECX wasn't needed by an 'inreg' parameter. 4992 const FunctionType *FTy = Func->getFunctionType(); 4993 const ParamAttrsList *Attrs = Func->getParamAttrs(); 4994 4995 if (Attrs && !Func->isVarArg()) { 4996 unsigned InRegCount = 0; 4997 unsigned Idx = 1; 4998 4999 for (FunctionType::param_iterator I = FTy->param_begin(), 5000 E = FTy->param_end(); I != E; ++I, ++Idx) 5001 if (Attrs->paramHasAttr(Idx, ParamAttr::InReg)) 5002 // FIXME: should only count parameters that are lowered to integers. 5003 InRegCount += (getTargetData()->getTypeSizeInBits(*I) + 31) / 32; 5004 5005 if (InRegCount > 2) { 5006 cerr << "Nest register in use - reduce number of inreg parameters!\n"; 5007 abort(); 5008 } 5009 } 5010 break; 5011 } 5012 case CallingConv::X86_FastCall: 5013 // Pass 'nest' parameter in EAX. 5014 // Must be kept in sync with X86CallingConv.td 5015 NestReg = X86::EAX; 5016 break; 5017 } 5018 5019 SDOperand OutChains[4]; 5020 SDOperand Addr, Disp; 5021 5022 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32)); 5023 Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr); 5024 5025 const unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri); 5026 const unsigned char N86Reg = 5027 ((X86RegisterInfo*)RegInfo)->getX86RegNum(NestReg); 5028 OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 5029 Trmp, TrmpSV->getValue(), TrmpSV->getOffset()); 5030 5031 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32)); 5032 OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpSV->getValue(), 5033 TrmpSV->getOffset() + 1, false, 1); 5034 5035 const unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP); 5036 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32)); 5037 OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr, 5038 TrmpSV->getValue() + 5, TrmpSV->getOffset()); 5039 5040 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(6, MVT::i32)); 5041 OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpSV->getValue(), 5042 TrmpSV->getOffset() + 6, false, 1); 5043 5044 SDOperand Ops[] = 5045 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) }; 5046 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5047 } 5048} 5049 5050SDOperand X86TargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { 5051 /* 5052 The rounding mode is in bits 11:10 of FPSR, and has the following 5053 settings: 5054 00 Round to nearest 5055 01 Round to -inf 5056 10 Round to +inf 5057 11 Round to 0 5058 5059 FLT_ROUNDS, on the other hand, expects the following: 5060 -1 Undefined 5061 0 Round to 0 5062 1 Round to nearest 5063 2 Round to +inf 5064 3 Round to -inf 5065 5066 To perform the conversion, we do: 5067 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 5068 */ 5069 5070 MachineFunction &MF = DAG.getMachineFunction(); 5071 const TargetMachine &TM = MF.getTarget(); 5072 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 5073 unsigned StackAlignment = TFI.getStackAlignment(); 5074 MVT::ValueType VT = Op.getValueType(); 5075 5076 // Save FP Control Word to stack slot 5077 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment); 5078 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 5079 5080 SDOperand Chain = DAG.getNode(X86ISD::FNSTCW16m, MVT::Other, 5081 DAG.getEntryNode(), StackSlot); 5082 5083 // Load FP Control Word from stack slot 5084 SDOperand CWD = DAG.getLoad(MVT::i16, Chain, StackSlot, NULL, 0); 5085 5086 // Transform as necessary 5087 SDOperand CWD1 = 5088 DAG.getNode(ISD::SRL, MVT::i16, 5089 DAG.getNode(ISD::AND, MVT::i16, 5090 CWD, DAG.getConstant(0x800, MVT::i16)), 5091 DAG.getConstant(11, MVT::i8)); 5092 SDOperand CWD2 = 5093 DAG.getNode(ISD::SRL, MVT::i16, 5094 DAG.getNode(ISD::AND, MVT::i16, 5095 CWD, DAG.getConstant(0x400, MVT::i16)), 5096 DAG.getConstant(9, MVT::i8)); 5097 5098 SDOperand RetVal = 5099 DAG.getNode(ISD::AND, MVT::i16, 5100 DAG.getNode(ISD::ADD, MVT::i16, 5101 DAG.getNode(ISD::OR, MVT::i16, CWD1, CWD2), 5102 DAG.getConstant(1, MVT::i16)), 5103 DAG.getConstant(3, MVT::i16)); 5104 5105 5106 return DAG.getNode((MVT::getSizeInBits(VT) < 16 ? 5107 ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); 5108} 5109 5110SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) { 5111 MVT::ValueType VT = Op.getValueType(); 5112 MVT::ValueType OpVT = VT; 5113 unsigned NumBits = MVT::getSizeInBits(VT); 5114 5115 Op = Op.getOperand(0); 5116 if (VT == MVT::i8) { 5117 // Zero extend to i32 since there is not an i8 bsr. 5118 OpVT = MVT::i32; 5119 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5120 } 5121 5122 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 5123 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5124 Op = DAG.getNode(X86ISD::BSR, VTs, Op); 5125 5126 // If src is zero (i.e. bsr sets ZF), returns NumBits. 5127 SmallVector<SDOperand, 4> Ops; 5128 Ops.push_back(Op); 5129 Ops.push_back(DAG.getConstant(NumBits+NumBits-1, OpVT)); 5130 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5131 Ops.push_back(Op.getValue(1)); 5132 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5133 5134 // Finally xor with NumBits-1. 5135 Op = DAG.getNode(ISD::XOR, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 5136 5137 if (VT == MVT::i8) 5138 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5139 return Op; 5140} 5141 5142SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { 5143 MVT::ValueType VT = Op.getValueType(); 5144 MVT::ValueType OpVT = VT; 5145 unsigned NumBits = MVT::getSizeInBits(VT); 5146 5147 Op = Op.getOperand(0); 5148 if (VT == MVT::i8) { 5149 OpVT = MVT::i32; 5150 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5151 } 5152 5153 // Issue a bsf (scan bits forward) which also sets EFLAGS. 5154 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5155 Op = DAG.getNode(X86ISD::BSF, VTs, Op); 5156 5157 // If src is zero (i.e. bsf sets ZF), returns NumBits. 5158 SmallVector<SDOperand, 4> Ops; 5159 Ops.push_back(Op); 5160 Ops.push_back(DAG.getConstant(NumBits, OpVT)); 5161 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5162 Ops.push_back(Op.getValue(1)); 5163 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5164 5165 if (VT == MVT::i8) 5166 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5167 return Op; 5168} 5169 5170/// LowerOperation - Provide custom lowering hooks for some operations. 5171/// 5172SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 5173 switch (Op.getOpcode()) { 5174 default: assert(0 && "Should not custom lower this!"); 5175 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 5176 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5177 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 5178 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 5179 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 5180 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5181 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 5182 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5183 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 5184 case ISD::SHL_PARTS: 5185 case ISD::SRA_PARTS: 5186 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 5187 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 5188 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 5189 case ISD::FABS: return LowerFABS(Op, DAG); 5190 case ISD::FNEG: return LowerFNEG(Op, DAG); 5191 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 5192 case ISD::SETCC: return LowerSETCC(Op, DAG); 5193 case ISD::SELECT: return LowerSELECT(Op, DAG); 5194 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 5195 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 5196 case ISD::CALL: return LowerCALL(Op, DAG); 5197 case ISD::RET: return LowerRET(Op, DAG); 5198 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); 5199 case ISD::MEMSET: return LowerMEMSET(Op, DAG); 5200 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG); 5201 case ISD::VASTART: return LowerVASTART(Op, DAG); 5202 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 5203 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 5204 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5205 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5206 case ISD::FRAME_TO_ARGS_OFFSET: 5207 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 5208 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 5209 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 5210 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 5211 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5212 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 5213 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 5214 5215 // FIXME: REMOVE THIS WHEN LegalizeDAGTypes lands. 5216 case ISD::READCYCLECOUNTER: 5217 return SDOperand(ExpandREADCYCLECOUNTER(Op.Val, DAG), 0); 5218 } 5219} 5220 5221/// ExpandOperation - Provide custom lowering hooks for expanding operations. 5222SDNode *X86TargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) { 5223 switch (N->getOpcode()) { 5224 default: assert(0 && "Should not custom lower this!"); 5225 case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG); 5226 case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG); 5227 } 5228} 5229 5230const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 5231 switch (Opcode) { 5232 default: return NULL; 5233 case X86ISD::BSF: return "X86ISD::BSF"; 5234 case X86ISD::BSR: return "X86ISD::BSR"; 5235 case X86ISD::SHLD: return "X86ISD::SHLD"; 5236 case X86ISD::SHRD: return "X86ISD::SHRD"; 5237 case X86ISD::FAND: return "X86ISD::FAND"; 5238 case X86ISD::FOR: return "X86ISD::FOR"; 5239 case X86ISD::FXOR: return "X86ISD::FXOR"; 5240 case X86ISD::FSRL: return "X86ISD::FSRL"; 5241 case X86ISD::FILD: return "X86ISD::FILD"; 5242 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 5243 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 5244 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 5245 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 5246 case X86ISD::FLD: return "X86ISD::FLD"; 5247 case X86ISD::FST: return "X86ISD::FST"; 5248 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT"; 5249 case X86ISD::FP_GET_RESULT2: return "X86ISD::FP_GET_RESULT2"; 5250 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT"; 5251 case X86ISD::CALL: return "X86ISD::CALL"; 5252 case X86ISD::TAILCALL: return "X86ISD::TAILCALL"; 5253 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 5254 case X86ISD::CMP: return "X86ISD::CMP"; 5255 case X86ISD::COMI: return "X86ISD::COMI"; 5256 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 5257 case X86ISD::SETCC: return "X86ISD::SETCC"; 5258 case X86ISD::CMOV: return "X86ISD::CMOV"; 5259 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 5260 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 5261 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 5262 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 5263 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 5264 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 5265 case X86ISD::S2VEC: return "X86ISD::S2VEC"; 5266 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 5267 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 5268 case X86ISD::FMAX: return "X86ISD::FMAX"; 5269 case X86ISD::FMIN: return "X86ISD::FMIN"; 5270 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 5271 case X86ISD::FRCP: return "X86ISD::FRCP"; 5272 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 5273 case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER"; 5274 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 5275 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 5276 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 5277 } 5278} 5279 5280// isLegalAddressingMode - Return true if the addressing mode represented 5281// by AM is legal for this target, for a load/store of the specified type. 5282bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 5283 const Type *Ty) const { 5284 // X86 supports extremely general addressing modes. 5285 5286 // X86 allows a sign-extended 32-bit immediate field as a displacement. 5287 if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1) 5288 return false; 5289 5290 if (AM.BaseGV) { 5291 // We can only fold this if we don't need an extra load. 5292 if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false)) 5293 return false; 5294 5295 // X86-64 only supports addr of globals in small code model. 5296 if (Subtarget->is64Bit()) { 5297 if (getTargetMachine().getCodeModel() != CodeModel::Small) 5298 return false; 5299 // If lower 4G is not available, then we must use rip-relative addressing. 5300 if (AM.BaseOffs || AM.Scale > 1) 5301 return false; 5302 } 5303 } 5304 5305 switch (AM.Scale) { 5306 case 0: 5307 case 1: 5308 case 2: 5309 case 4: 5310 case 8: 5311 // These scales always work. 5312 break; 5313 case 3: 5314 case 5: 5315 case 9: 5316 // These scales are formed with basereg+scalereg. Only accept if there is 5317 // no basereg yet. 5318 if (AM.HasBaseReg) 5319 return false; 5320 break; 5321 default: // Other stuff never works. 5322 return false; 5323 } 5324 5325 return true; 5326} 5327 5328 5329bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const { 5330 if (!Ty1->isInteger() || !Ty2->isInteger()) 5331 return false; 5332 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 5333 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 5334 if (NumBits1 <= NumBits2) 5335 return false; 5336 return Subtarget->is64Bit() || NumBits1 < 64; 5337} 5338 5339bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1, 5340 MVT::ValueType VT2) const { 5341 if (!MVT::isInteger(VT1) || !MVT::isInteger(VT2)) 5342 return false; 5343 unsigned NumBits1 = MVT::getSizeInBits(VT1); 5344 unsigned NumBits2 = MVT::getSizeInBits(VT2); 5345 if (NumBits1 <= NumBits2) 5346 return false; 5347 return Subtarget->is64Bit() || NumBits1 < 64; 5348} 5349 5350/// isShuffleMaskLegal - Targets can use this to indicate that they only 5351/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 5352/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 5353/// are assumed to be legal. 5354bool 5355X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { 5356 // Only do shuffles on 128-bit vector types for now. 5357 if (MVT::getSizeInBits(VT) == 64) return false; 5358 return (Mask.Val->getNumOperands() <= 4 || 5359 isIdentityMask(Mask.Val) || 5360 isIdentityMask(Mask.Val, true) || 5361 isSplatMask(Mask.Val) || 5362 isPSHUFHW_PSHUFLWMask(Mask.Val) || 5363 X86::isUNPCKLMask(Mask.Val) || 5364 X86::isUNPCKHMask(Mask.Val) || 5365 X86::isUNPCKL_v_undef_Mask(Mask.Val) || 5366 X86::isUNPCKH_v_undef_Mask(Mask.Val)); 5367} 5368 5369bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps, 5370 MVT::ValueType EVT, 5371 SelectionDAG &DAG) const { 5372 unsigned NumElts = BVOps.size(); 5373 // Only do shuffles on 128-bit vector types for now. 5374 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; 5375 if (NumElts == 2) return true; 5376 if (NumElts == 4) { 5377 return (isMOVLMask(&BVOps[0], 4) || 5378 isCommutedMOVL(&BVOps[0], 4, true) || 5379 isSHUFPMask(&BVOps[0], 4) || 5380 isCommutedSHUFP(&BVOps[0], 4)); 5381 } 5382 return false; 5383} 5384 5385//===----------------------------------------------------------------------===// 5386// X86 Scheduler Hooks 5387//===----------------------------------------------------------------------===// 5388 5389MachineBasicBlock * 5390X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 5391 MachineBasicBlock *BB) { 5392 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5393 switch (MI->getOpcode()) { 5394 default: assert(false && "Unexpected instr type to insert"); 5395 case X86::CMOV_FR32: 5396 case X86::CMOV_FR64: 5397 case X86::CMOV_V4F32: 5398 case X86::CMOV_V2F64: 5399 case X86::CMOV_V2I64: { 5400 // To "insert" a SELECT_CC instruction, we actually have to insert the 5401 // diamond control-flow pattern. The incoming instruction knows the 5402 // destination vreg to set, the condition code register to branch on, the 5403 // true/false values to select between, and a branch opcode to use. 5404 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5405 ilist<MachineBasicBlock>::iterator It = BB; 5406 ++It; 5407 5408 // thisMBB: 5409 // ... 5410 // TrueVal = ... 5411 // cmpTY ccX, r1, r2 5412 // bCC copy1MBB 5413 // fallthrough --> copy0MBB 5414 MachineBasicBlock *thisMBB = BB; 5415 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 5416 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 5417 unsigned Opc = 5418 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 5419 BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB); 5420 MachineFunction *F = BB->getParent(); 5421 F->getBasicBlockList().insert(It, copy0MBB); 5422 F->getBasicBlockList().insert(It, sinkMBB); 5423 // Update machine-CFG edges by first adding all successors of the current 5424 // block to the new block which will contain the Phi node for the select. 5425 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 5426 e = BB->succ_end(); i != e; ++i) 5427 sinkMBB->addSuccessor(*i); 5428 // Next, remove all successors of the current block, and add the true 5429 // and fallthrough blocks as its successors. 5430 while(!BB->succ_empty()) 5431 BB->removeSuccessor(BB->succ_begin()); 5432 BB->addSuccessor(copy0MBB); 5433 BB->addSuccessor(sinkMBB); 5434 5435 // copy0MBB: 5436 // %FalseValue = ... 5437 // # fallthrough to sinkMBB 5438 BB = copy0MBB; 5439 5440 // Update machine-CFG edges 5441 BB->addSuccessor(sinkMBB); 5442 5443 // sinkMBB: 5444 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 5445 // ... 5446 BB = sinkMBB; 5447 BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg()) 5448 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 5449 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 5450 5451 delete MI; // The pseudo instruction is gone now. 5452 return BB; 5453 } 5454 5455 case X86::FP32_TO_INT16_IN_MEM: 5456 case X86::FP32_TO_INT32_IN_MEM: 5457 case X86::FP32_TO_INT64_IN_MEM: 5458 case X86::FP64_TO_INT16_IN_MEM: 5459 case X86::FP64_TO_INT32_IN_MEM: 5460 case X86::FP64_TO_INT64_IN_MEM: 5461 case X86::FP80_TO_INT16_IN_MEM: 5462 case X86::FP80_TO_INT32_IN_MEM: 5463 case X86::FP80_TO_INT64_IN_MEM: { 5464 // Change the floating point control register to use "round towards zero" 5465 // mode when truncating to an integer value. 5466 MachineFunction *F = BB->getParent(); 5467 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2); 5468 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx); 5469 5470 // Load the old value of the high byte of the control word... 5471 unsigned OldCW = 5472 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass); 5473 addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx); 5474 5475 // Set the high part to be round to zero... 5476 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx) 5477 .addImm(0xC7F); 5478 5479 // Reload the modified control word now... 5480 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5481 5482 // Restore the memory image of control word to original value 5483 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx) 5484 .addReg(OldCW); 5485 5486 // Get the X86 opcode to use. 5487 unsigned Opc; 5488 switch (MI->getOpcode()) { 5489 default: assert(0 && "illegal opcode!"); 5490 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 5491 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 5492 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 5493 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 5494 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 5495 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 5496 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 5497 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 5498 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 5499 } 5500 5501 X86AddressMode AM; 5502 MachineOperand &Op = MI->getOperand(0); 5503 if (Op.isRegister()) { 5504 AM.BaseType = X86AddressMode::RegBase; 5505 AM.Base.Reg = Op.getReg(); 5506 } else { 5507 AM.BaseType = X86AddressMode::FrameIndexBase; 5508 AM.Base.FrameIndex = Op.getIndex(); 5509 } 5510 Op = MI->getOperand(1); 5511 if (Op.isImmediate()) 5512 AM.Scale = Op.getImm(); 5513 Op = MI->getOperand(2); 5514 if (Op.isImmediate()) 5515 AM.IndexReg = Op.getImm(); 5516 Op = MI->getOperand(3); 5517 if (Op.isGlobalAddress()) { 5518 AM.GV = Op.getGlobal(); 5519 } else { 5520 AM.Disp = Op.getImm(); 5521 } 5522 addFullAddress(BuildMI(BB, TII->get(Opc)), AM) 5523 .addReg(MI->getOperand(4).getReg()); 5524 5525 // Reload the original control word now. 5526 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5527 5528 delete MI; // The pseudo instruction is gone now. 5529 return BB; 5530 } 5531 } 5532} 5533 5534//===----------------------------------------------------------------------===// 5535// X86 Optimization Hooks 5536//===----------------------------------------------------------------------===// 5537 5538void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 5539 uint64_t Mask, 5540 uint64_t &KnownZero, 5541 uint64_t &KnownOne, 5542 const SelectionDAG &DAG, 5543 unsigned Depth) const { 5544 unsigned Opc = Op.getOpcode(); 5545 assert((Opc >= ISD::BUILTIN_OP_END || 5546 Opc == ISD::INTRINSIC_WO_CHAIN || 5547 Opc == ISD::INTRINSIC_W_CHAIN || 5548 Opc == ISD::INTRINSIC_VOID) && 5549 "Should use MaskedValueIsZero if you don't know whether Op" 5550 " is a target node!"); 5551 5552 KnownZero = KnownOne = 0; // Don't know anything. 5553 switch (Opc) { 5554 default: break; 5555 case X86ISD::SETCC: 5556 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL); 5557 break; 5558 } 5559} 5560 5561/// getShuffleScalarElt - Returns the scalar element that will make up the ith 5562/// element of the result of the vector shuffle. 5563static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) { 5564 MVT::ValueType VT = N->getValueType(0); 5565 SDOperand PermMask = N->getOperand(2); 5566 unsigned NumElems = PermMask.getNumOperands(); 5567 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1); 5568 i %= NumElems; 5569 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5570 return (i == 0) 5571 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5572 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) { 5573 SDOperand Idx = PermMask.getOperand(i); 5574 if (Idx.getOpcode() == ISD::UNDEF) 5575 return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5576 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG); 5577 } 5578 return SDOperand(); 5579} 5580 5581/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 5582/// node is a GlobalAddress + an offset. 5583static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) { 5584 unsigned Opc = N->getOpcode(); 5585 if (Opc == X86ISD::Wrapper) { 5586 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) { 5587 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 5588 return true; 5589 } 5590 } else if (Opc == ISD::ADD) { 5591 SDOperand N1 = N->getOperand(0); 5592 SDOperand N2 = N->getOperand(1); 5593 if (isGAPlusOffset(N1.Val, GA, Offset)) { 5594 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 5595 if (V) { 5596 Offset += V->getSignExtended(); 5597 return true; 5598 } 5599 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 5600 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 5601 if (V) { 5602 Offset += V->getSignExtended(); 5603 return true; 5604 } 5605 } 5606 } 5607 return false; 5608} 5609 5610/// isConsecutiveLoad - Returns true if N is loading from an address of Base 5611/// + Dist * Size. 5612static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size, 5613 MachineFrameInfo *MFI) { 5614 if (N->getOperand(0).Val != Base->getOperand(0).Val) 5615 return false; 5616 5617 SDOperand Loc = N->getOperand(1); 5618 SDOperand BaseLoc = Base->getOperand(1); 5619 if (Loc.getOpcode() == ISD::FrameIndex) { 5620 if (BaseLoc.getOpcode() != ISD::FrameIndex) 5621 return false; 5622 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 5623 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 5624 int FS = MFI->getObjectSize(FI); 5625 int BFS = MFI->getObjectSize(BFI); 5626 if (FS != BFS || FS != Size) return false; 5627 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size); 5628 } else { 5629 GlobalValue *GV1 = NULL; 5630 GlobalValue *GV2 = NULL; 5631 int64_t Offset1 = 0; 5632 int64_t Offset2 = 0; 5633 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 5634 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 5635 if (isGA1 && isGA2 && GV1 == GV2) 5636 return Offset1 == (Offset2 + Dist*Size); 5637 } 5638 5639 return false; 5640} 5641 5642static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI, 5643 const X86Subtarget *Subtarget) { 5644 GlobalValue *GV; 5645 int64_t Offset; 5646 if (isGAPlusOffset(Base, GV, Offset)) 5647 return (GV->getAlignment() >= 16 && (Offset % 16) == 0); 5648 // DAG combine handles the stack object case. 5649 return false; 5650} 5651 5652 5653/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 5654/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 5655/// if the load addresses are consecutive, non-overlapping, and in the right 5656/// order. 5657static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 5658 const X86Subtarget *Subtarget) { 5659 MachineFunction &MF = DAG.getMachineFunction(); 5660 MachineFrameInfo *MFI = MF.getFrameInfo(); 5661 MVT::ValueType VT = N->getValueType(0); 5662 MVT::ValueType EVT = MVT::getVectorElementType(VT); 5663 SDOperand PermMask = N->getOperand(2); 5664 int NumElems = (int)PermMask.getNumOperands(); 5665 SDNode *Base = NULL; 5666 for (int i = 0; i < NumElems; ++i) { 5667 SDOperand Idx = PermMask.getOperand(i); 5668 if (Idx.getOpcode() == ISD::UNDEF) { 5669 if (!Base) return SDOperand(); 5670 } else { 5671 SDOperand Arg = 5672 getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG); 5673 if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val)) 5674 return SDOperand(); 5675 if (!Base) 5676 Base = Arg.Val; 5677 else if (!isConsecutiveLoad(Arg.Val, Base, 5678 i, MVT::getSizeInBits(EVT)/8,MFI)) 5679 return SDOperand(); 5680 } 5681 } 5682 5683 bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget); 5684 LoadSDNode *LD = cast<LoadSDNode>(Base); 5685 if (isAlign16) { 5686 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 5687 LD->getSrcValueOffset(), LD->isVolatile()); 5688 } else { 5689 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 5690 LD->getSrcValueOffset(), LD->isVolatile(), 5691 LD->getAlignment()); 5692 } 5693} 5694 5695/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 5696static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 5697 const X86Subtarget *Subtarget) { 5698 SDOperand Cond = N->getOperand(0); 5699 5700 // If we have SSE[12] support, try to form min/max nodes. 5701 if (Subtarget->hasSSE2() && 5702 (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) { 5703 if (Cond.getOpcode() == ISD::SETCC) { 5704 // Get the LHS/RHS of the select. 5705 SDOperand LHS = N->getOperand(1); 5706 SDOperand RHS = N->getOperand(2); 5707 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 5708 5709 unsigned Opcode = 0; 5710 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) { 5711 switch (CC) { 5712 default: break; 5713 case ISD::SETOLE: // (X <= Y) ? X : Y -> min 5714 case ISD::SETULE: 5715 case ISD::SETLE: 5716 if (!UnsafeFPMath) break; 5717 // FALL THROUGH. 5718 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min 5719 case ISD::SETLT: 5720 Opcode = X86ISD::FMIN; 5721 break; 5722 5723 case ISD::SETOGT: // (X > Y) ? X : Y -> max 5724 case ISD::SETUGT: 5725 case ISD::SETGT: 5726 if (!UnsafeFPMath) break; 5727 // FALL THROUGH. 5728 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max 5729 case ISD::SETGE: 5730 Opcode = X86ISD::FMAX; 5731 break; 5732 } 5733 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) { 5734 switch (CC) { 5735 default: break; 5736 case ISD::SETOGT: // (X > Y) ? Y : X -> min 5737 case ISD::SETUGT: 5738 case ISD::SETGT: 5739 if (!UnsafeFPMath) break; 5740 // FALL THROUGH. 5741 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min 5742 case ISD::SETGE: 5743 Opcode = X86ISD::FMIN; 5744 break; 5745 5746 case ISD::SETOLE: // (X <= Y) ? Y : X -> max 5747 case ISD::SETULE: 5748 case ISD::SETLE: 5749 if (!UnsafeFPMath) break; 5750 // FALL THROUGH. 5751 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max 5752 case ISD::SETLT: 5753 Opcode = X86ISD::FMAX; 5754 break; 5755 } 5756 } 5757 5758 if (Opcode) 5759 return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS); 5760 } 5761 5762 } 5763 5764 return SDOperand(); 5765} 5766 5767/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 5768/// X86ISD::FXOR nodes. 5769static SDOperand PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 5770 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 5771 // F[X]OR(0.0, x) -> x 5772 // F[X]OR(x, 0.0) -> x 5773 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 5774 if (C->getValueAPF().isPosZero()) 5775 return N->getOperand(1); 5776 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 5777 if (C->getValueAPF().isPosZero()) 5778 return N->getOperand(0); 5779 return SDOperand(); 5780} 5781 5782/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 5783static SDOperand PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 5784 // FAND(0.0, x) -> 0.0 5785 // FAND(x, 0.0) -> 0.0 5786 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 5787 if (C->getValueAPF().isPosZero()) 5788 return N->getOperand(0); 5789 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 5790 if (C->getValueAPF().isPosZero()) 5791 return N->getOperand(1); 5792 return SDOperand(); 5793} 5794 5795 5796SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, 5797 DAGCombinerInfo &DCI) const { 5798 SelectionDAG &DAG = DCI.DAG; 5799 switch (N->getOpcode()) { 5800 default: break; 5801 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, Subtarget); 5802 case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget); 5803 case X86ISD::FXOR: 5804 case X86ISD::FOR: return PerformFORCombine(N, DAG); 5805 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 5806 } 5807 5808 return SDOperand(); 5809} 5810 5811//===----------------------------------------------------------------------===// 5812// X86 Inline Assembly Support 5813//===----------------------------------------------------------------------===// 5814 5815/// getConstraintType - Given a constraint letter, return the type of 5816/// constraint it is for this target. 5817X86TargetLowering::ConstraintType 5818X86TargetLowering::getConstraintType(const std::string &Constraint) const { 5819 if (Constraint.size() == 1) { 5820 switch (Constraint[0]) { 5821 case 'A': 5822 case 'r': 5823 case 'R': 5824 case 'l': 5825 case 'q': 5826 case 'Q': 5827 case 'x': 5828 case 'Y': 5829 return C_RegisterClass; 5830 default: 5831 break; 5832 } 5833 } 5834 return TargetLowering::getConstraintType(Constraint); 5835} 5836 5837/// LowerXConstraint - try to replace an X constraint, which matches anything, 5838/// with another that has more specific requirements based on the type of the 5839/// corresponding operand. 5840void X86TargetLowering::lowerXConstraint(MVT::ValueType ConstraintVT, 5841 std::string& s) const { 5842 if (MVT::isFloatingPoint(ConstraintVT)) { 5843 if (Subtarget->hasSSE2()) 5844 s = "Y"; 5845 else if (Subtarget->hasSSE1()) 5846 s = "x"; 5847 else 5848 s = "f"; 5849 } else 5850 return TargetLowering::lowerXConstraint(ConstraintVT, s); 5851} 5852 5853/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 5854/// vector. If it is invalid, don't add anything to Ops. 5855void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, 5856 char Constraint, 5857 std::vector<SDOperand>&Ops, 5858 SelectionDAG &DAG) { 5859 SDOperand Result(0, 0); 5860 5861 switch (Constraint) { 5862 default: break; 5863 case 'I': 5864 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 5865 if (C->getValue() <= 31) { 5866 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 5867 break; 5868 } 5869 } 5870 return; 5871 case 'N': 5872 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 5873 if (C->getValue() <= 255) { 5874 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 5875 break; 5876 } 5877 } 5878 return; 5879 case 'i': { 5880 // Literal immediates are always ok. 5881 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 5882 Result = DAG.getTargetConstant(CST->getValue(), Op.getValueType()); 5883 break; 5884 } 5885 5886 // If we are in non-pic codegen mode, we allow the address of a global (with 5887 // an optional displacement) to be used with 'i'. 5888 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 5889 int64_t Offset = 0; 5890 5891 // Match either (GA) or (GA+C) 5892 if (GA) { 5893 Offset = GA->getOffset(); 5894 } else if (Op.getOpcode() == ISD::ADD) { 5895 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 5896 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 5897 if (C && GA) { 5898 Offset = GA->getOffset()+C->getValue(); 5899 } else { 5900 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 5901 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 5902 if (C && GA) 5903 Offset = GA->getOffset()+C->getValue(); 5904 else 5905 C = 0, GA = 0; 5906 } 5907 } 5908 5909 if (GA) { 5910 // If addressing this global requires a load (e.g. in PIC mode), we can't 5911 // match. 5912 if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(), 5913 false)) 5914 return; 5915 5916 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), 5917 Offset); 5918 Result = Op; 5919 break; 5920 } 5921 5922 // Otherwise, not valid for this mode. 5923 return; 5924 } 5925 } 5926 5927 if (Result.Val) { 5928 Ops.push_back(Result); 5929 return; 5930 } 5931 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 5932} 5933 5934std::vector<unsigned> X86TargetLowering:: 5935getRegClassForInlineAsmConstraint(const std::string &Constraint, 5936 MVT::ValueType VT) const { 5937 if (Constraint.size() == 1) { 5938 // FIXME: not handling fp-stack yet! 5939 switch (Constraint[0]) { // GCC X86 Constraint Letters 5940 default: break; // Unknown constraint letter 5941 case 'A': // EAX/EDX 5942 if (VT == MVT::i32 || VT == MVT::i64) 5943 return make_vector<unsigned>(X86::EAX, X86::EDX, 0); 5944 break; 5945 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode) 5946 case 'Q': // Q_REGS 5947 if (VT == MVT::i32) 5948 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 5949 else if (VT == MVT::i16) 5950 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 5951 else if (VT == MVT::i8) 5952 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0); 5953 else if (VT == MVT::i64) 5954 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0); 5955 break; 5956 } 5957 } 5958 5959 return std::vector<unsigned>(); 5960} 5961 5962std::pair<unsigned, const TargetRegisterClass*> 5963X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 5964 MVT::ValueType VT) const { 5965 // First, see if this is a constraint that directly corresponds to an LLVM 5966 // register class. 5967 if (Constraint.size() == 1) { 5968 // GCC Constraint Letters 5969 switch (Constraint[0]) { 5970 default: break; 5971 case 'r': // GENERAL_REGS 5972 case 'R': // LEGACY_REGS 5973 case 'l': // INDEX_REGS 5974 if (VT == MVT::i64 && Subtarget->is64Bit()) 5975 return std::make_pair(0U, X86::GR64RegisterClass); 5976 if (VT == MVT::i32) 5977 return std::make_pair(0U, X86::GR32RegisterClass); 5978 else if (VT == MVT::i16) 5979 return std::make_pair(0U, X86::GR16RegisterClass); 5980 else if (VT == MVT::i8) 5981 return std::make_pair(0U, X86::GR8RegisterClass); 5982 break; 5983 case 'y': // MMX_REGS if MMX allowed. 5984 if (!Subtarget->hasMMX()) break; 5985 return std::make_pair(0U, X86::VR64RegisterClass); 5986 break; 5987 case 'Y': // SSE_REGS if SSE2 allowed 5988 if (!Subtarget->hasSSE2()) break; 5989 // FALL THROUGH. 5990 case 'x': // SSE_REGS if SSE1 allowed 5991 if (!Subtarget->hasSSE1()) break; 5992 5993 switch (VT) { 5994 default: break; 5995 // Scalar SSE types. 5996 case MVT::f32: 5997 case MVT::i32: 5998 return std::make_pair(0U, X86::FR32RegisterClass); 5999 case MVT::f64: 6000 case MVT::i64: 6001 return std::make_pair(0U, X86::FR64RegisterClass); 6002 // Vector types. 6003 case MVT::v16i8: 6004 case MVT::v8i16: 6005 case MVT::v4i32: 6006 case MVT::v2i64: 6007 case MVT::v4f32: 6008 case MVT::v2f64: 6009 return std::make_pair(0U, X86::VR128RegisterClass); 6010 } 6011 break; 6012 } 6013 } 6014 6015 // Use the default implementation in TargetLowering to convert the register 6016 // constraint into a member of a register class. 6017 std::pair<unsigned, const TargetRegisterClass*> Res; 6018 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 6019 6020 // Not found as a standard register? 6021 if (Res.second == 0) { 6022 // GCC calls "st(0)" just plain "st". 6023 if (StringsEqualNoCase("{st}", Constraint)) { 6024 Res.first = X86::ST0; 6025 Res.second = X86::RFP80RegisterClass; 6026 } 6027 6028 return Res; 6029 } 6030 6031 // Otherwise, check to see if this is a register class of the wrong value 6032 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 6033 // turn into {ax},{dx}. 6034 if (Res.second->hasType(VT)) 6035 return Res; // Correct type already, nothing to do. 6036 6037 // All of the single-register GCC register classes map their values onto 6038 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 6039 // really want an 8-bit or 32-bit register, map to the appropriate register 6040 // class and return the appropriate register. 6041 if (Res.second != X86::GR16RegisterClass) 6042 return Res; 6043 6044 if (VT == MVT::i8) { 6045 unsigned DestReg = 0; 6046 switch (Res.first) { 6047 default: break; 6048 case X86::AX: DestReg = X86::AL; break; 6049 case X86::DX: DestReg = X86::DL; break; 6050 case X86::CX: DestReg = X86::CL; break; 6051 case X86::BX: DestReg = X86::BL; break; 6052 } 6053 if (DestReg) { 6054 Res.first = DestReg; 6055 Res.second = Res.second = X86::GR8RegisterClass; 6056 } 6057 } else if (VT == MVT::i32) { 6058 unsigned DestReg = 0; 6059 switch (Res.first) { 6060 default: break; 6061 case X86::AX: DestReg = X86::EAX; break; 6062 case X86::DX: DestReg = X86::EDX; break; 6063 case X86::CX: DestReg = X86::ECX; break; 6064 case X86::BX: DestReg = X86::EBX; break; 6065 case X86::SI: DestReg = X86::ESI; break; 6066 case X86::DI: DestReg = X86::EDI; break; 6067 case X86::BP: DestReg = X86::EBP; break; 6068 case X86::SP: DestReg = X86::ESP; break; 6069 } 6070 if (DestReg) { 6071 Res.first = DestReg; 6072 Res.second = Res.second = X86::GR32RegisterClass; 6073 } 6074 } else if (VT == MVT::i64) { 6075 unsigned DestReg = 0; 6076 switch (Res.first) { 6077 default: break; 6078 case X86::AX: DestReg = X86::RAX; break; 6079 case X86::DX: DestReg = X86::RDX; break; 6080 case X86::CX: DestReg = X86::RCX; break; 6081 case X86::BX: DestReg = X86::RBX; break; 6082 case X86::SI: DestReg = X86::RSI; break; 6083 case X86::DI: DestReg = X86::RDI; break; 6084 case X86::BP: DestReg = X86::RBP; break; 6085 case X86::SP: DestReg = X86::RSP; break; 6086 } 6087 if (DestReg) { 6088 Res.first = DestReg; 6089 Res.second = Res.second = X86::GR64RegisterClass; 6090 } 6091 } 6092 6093 return Res; 6094} 6095