X86ISelLowering.cpp revision 4fdad172deda12e10a6cf9b5c54cf9346cfaef00
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86ISelLowering.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86TargetMachine.h" 20#include "llvm/CallingConv.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/ADT/BitVector.h" 27#include "llvm/ADT/VectorExtras.h" 28#include "llvm/Analysis/ScalarEvolutionExpressions.h" 29#include "llvm/CodeGen/CallingConvLower.h" 30#include "llvm/CodeGen/MachineFrameInfo.h" 31#include "llvm/CodeGen/MachineFunction.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/MachineModuleInfo.h" 34#include "llvm/CodeGen/MachineRegisterInfo.h" 35#include "llvm/CodeGen/PseudoSourceValue.h" 36#include "llvm/CodeGen/SelectionDAG.h" 37#include "llvm/Support/MathExtras.h" 38#include "llvm/Support/Debug.h" 39#include "llvm/Target/TargetOptions.h" 40#include "llvm/ADT/SmallSet.h" 41#include "llvm/ADT/StringExtras.h" 42#include "llvm/ParameterAttributes.h" 43using namespace llvm; 44 45X86TargetLowering::X86TargetLowering(TargetMachine &TM) 46 : TargetLowering(TM) { 47 Subtarget = &TM.getSubtarget<X86Subtarget>(); 48 X86ScalarSSEf64 = Subtarget->hasSSE2(); 49 X86ScalarSSEf32 = Subtarget->hasSSE1(); 50 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 51 52 bool Fast = false; 53 54 RegInfo = TM.getRegisterInfo(); 55 56 // Set up the TargetLowering object. 57 58 // X86 is weird, it always uses i8 for shift amounts and setcc results. 59 setShiftAmountType(MVT::i8); 60 setSetCCResultType(MVT::i8); 61 setSetCCResultContents(ZeroOrOneSetCCResult); 62 setSchedulingPreference(SchedulingForRegPressure); 63 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0 64 setStackPointerRegisterToSaveRestore(X86StackPtr); 65 66 if (Subtarget->isTargetDarwin()) { 67 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 68 setUseUnderscoreSetJmp(false); 69 setUseUnderscoreLongJmp(false); 70 } else if (Subtarget->isTargetMingw()) { 71 // MS runtime is weird: it exports _setjmp, but longjmp! 72 setUseUnderscoreSetJmp(true); 73 setUseUnderscoreLongJmp(false); 74 } else { 75 setUseUnderscoreSetJmp(true); 76 setUseUnderscoreLongJmp(true); 77 } 78 79 // Set up the register classes. 80 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 81 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 82 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 83 if (Subtarget->is64Bit()) 84 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 85 86 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote); 87 88 // We don't accept any truncstore of integer registers. 89 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 90 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 91 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 92 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 93 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 94 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 95 96 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 97 // operation. 98 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 99 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 100 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 101 102 if (Subtarget->is64Bit()) { 103 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 104 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 105 } else { 106 if (X86ScalarSSEf64) 107 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP. 108 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand); 109 else 110 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 111 } 112 113 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 114 // this operation. 115 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 116 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 117 // SSE has no i16 to fp conversion, only i32 118 if (X86ScalarSSEf32) { 119 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 120 // f32 and f64 cases are Legal, f80 case is not 121 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 122 } else { 123 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 124 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 125 } 126 127 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 128 // are Legal, f80 is custom lowered. 129 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 130 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 131 132 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 133 // this operation. 134 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 135 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 136 137 if (X86ScalarSSEf32) { 138 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 139 // f32 and f64 cases are Legal, f80 case is not 140 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 141 } else { 142 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 143 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 144 } 145 146 // Handle FP_TO_UINT by promoting the destination to a larger signed 147 // conversion. 148 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 149 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 150 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 151 152 if (Subtarget->is64Bit()) { 153 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 154 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 155 } else { 156 if (X86ScalarSSEf32 && !Subtarget->hasSSE3()) 157 // Expand FP_TO_UINT into a select. 158 // FIXME: We would like to use a Custom expander here eventually to do 159 // the optimal thing for SSE vs. the default expansion in the legalizer. 160 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 161 else 162 // With SSE3 we can use fisttpll to convert to a signed i64. 163 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 164 } 165 166 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 167 if (!X86ScalarSSEf64) { 168 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 169 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 170 } 171 172 // Scalar integer multiply, multiply-high, divide, and remainder are 173 // lowered to use operations that produce two results, to match the 174 // available instructions. This exposes the two-result form to trivial 175 // CSE, which is able to combine x/y and x%y into a single instruction, 176 // for example. The single-result multiply instructions are introduced 177 // in X86ISelDAGToDAG.cpp, after CSE, for uses where the the high part 178 // is not needed. 179 setOperationAction(ISD::MUL , MVT::i8 , Expand); 180 setOperationAction(ISD::MULHS , MVT::i8 , Expand); 181 setOperationAction(ISD::MULHU , MVT::i8 , Expand); 182 setOperationAction(ISD::SDIV , MVT::i8 , Expand); 183 setOperationAction(ISD::UDIV , MVT::i8 , Expand); 184 setOperationAction(ISD::SREM , MVT::i8 , Expand); 185 setOperationAction(ISD::UREM , MVT::i8 , Expand); 186 setOperationAction(ISD::MUL , MVT::i16 , Expand); 187 setOperationAction(ISD::MULHS , MVT::i16 , Expand); 188 setOperationAction(ISD::MULHU , MVT::i16 , Expand); 189 setOperationAction(ISD::SDIV , MVT::i16 , Expand); 190 setOperationAction(ISD::UDIV , MVT::i16 , Expand); 191 setOperationAction(ISD::SREM , MVT::i16 , Expand); 192 setOperationAction(ISD::UREM , MVT::i16 , Expand); 193 setOperationAction(ISD::MUL , MVT::i32 , Expand); 194 setOperationAction(ISD::MULHS , MVT::i32 , Expand); 195 setOperationAction(ISD::MULHU , MVT::i32 , Expand); 196 setOperationAction(ISD::SDIV , MVT::i32 , Expand); 197 setOperationAction(ISD::UDIV , MVT::i32 , Expand); 198 setOperationAction(ISD::SREM , MVT::i32 , Expand); 199 setOperationAction(ISD::UREM , MVT::i32 , Expand); 200 setOperationAction(ISD::MUL , MVT::i64 , Expand); 201 setOperationAction(ISD::MULHS , MVT::i64 , Expand); 202 setOperationAction(ISD::MULHU , MVT::i64 , Expand); 203 setOperationAction(ISD::SDIV , MVT::i64 , Expand); 204 setOperationAction(ISD::UDIV , MVT::i64 , Expand); 205 setOperationAction(ISD::SREM , MVT::i64 , Expand); 206 setOperationAction(ISD::UREM , MVT::i64 , Expand); 207 208 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 209 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 210 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 211 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 212 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); 213 if (Subtarget->is64Bit()) 214 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 215 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 216 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 217 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 218 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 219 setOperationAction(ISD::FREM , MVT::f64 , Expand); 220 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 221 222 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 223 setOperationAction(ISD::CTTZ , MVT::i8 , Custom); 224 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 225 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 226 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 227 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 228 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 229 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 230 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 231 if (Subtarget->is64Bit()) { 232 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 233 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 234 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 235 } 236 237 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 238 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 239 240 // These should be promoted to a larger select which is supported. 241 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 242 setOperationAction(ISD::SELECT , MVT::i8 , Promote); 243 // X86 wants to expand cmov itself. 244 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 245 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 246 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 247 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 248 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 249 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 250 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 251 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 252 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 253 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 254 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 255 if (Subtarget->is64Bit()) { 256 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 257 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 258 } 259 // X86 ret instruction may pop stack. 260 setOperationAction(ISD::RET , MVT::Other, Custom); 261 if (!Subtarget->is64Bit()) 262 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 263 264 // Darwin ABI issue. 265 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 266 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 267 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 268 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 269 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 270 if (Subtarget->is64Bit()) { 271 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 272 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 273 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 274 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 275 } 276 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 277 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 278 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 279 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 280 // X86 wants to expand memset / memcpy itself. 281 setOperationAction(ISD::MEMSET , MVT::Other, Custom); 282 setOperationAction(ISD::MEMCPY , MVT::Other, Custom); 283 284 // Use the default ISD::LOCATION, ISD::DECLARE expansion. 285 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 286 // FIXME - use subtarget debug flags 287 if (!Subtarget->isTargetDarwin() && 288 !Subtarget->isTargetELF() && 289 !Subtarget->isTargetCygMing()) 290 setOperationAction(ISD::LABEL, MVT::Other, Expand); 291 292 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 293 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 294 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 295 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 296 if (Subtarget->is64Bit()) { 297 // FIXME: Verify 298 setExceptionPointerRegister(X86::RAX); 299 setExceptionSelectorRegister(X86::RDX); 300 } else { 301 setExceptionPointerRegister(X86::EAX); 302 setExceptionSelectorRegister(X86::EDX); 303 } 304 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 305 306 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 307 308 setOperationAction(ISD::TRAP, MVT::Other, Legal); 309 310 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 311 setOperationAction(ISD::VASTART , MVT::Other, Custom); 312 setOperationAction(ISD::VAARG , MVT::Other, Expand); 313 setOperationAction(ISD::VAEND , MVT::Other, Expand); 314 if (Subtarget->is64Bit()) 315 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 316 else 317 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 318 319 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 320 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 321 if (Subtarget->is64Bit()) 322 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 323 if (Subtarget->isTargetCygMing()) 324 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 325 else 326 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 327 328 if (X86ScalarSSEf64) { 329 // f32 and f64 use SSE. 330 // Set up the FP register classes. 331 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 332 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 333 334 // Use ANDPD to simulate FABS. 335 setOperationAction(ISD::FABS , MVT::f64, Custom); 336 setOperationAction(ISD::FABS , MVT::f32, Custom); 337 338 // Use XORP to simulate FNEG. 339 setOperationAction(ISD::FNEG , MVT::f64, Custom); 340 setOperationAction(ISD::FNEG , MVT::f32, Custom); 341 342 // Use ANDPD and ORPD to simulate FCOPYSIGN. 343 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 344 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 345 346 // We don't support sin/cos/fmod 347 setOperationAction(ISD::FSIN , MVT::f64, Expand); 348 setOperationAction(ISD::FCOS , MVT::f64, Expand); 349 setOperationAction(ISD::FREM , MVT::f64, Expand); 350 setOperationAction(ISD::FSIN , MVT::f32, Expand); 351 setOperationAction(ISD::FCOS , MVT::f32, Expand); 352 setOperationAction(ISD::FREM , MVT::f32, Expand); 353 354 // Expand FP immediates into loads from the stack, except for the special 355 // cases we handle. 356 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 357 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 358 addLegalFPImmediate(APFloat(+0.0)); // xorpd 359 addLegalFPImmediate(APFloat(+0.0f)); // xorps 360 361 // Floating truncations from f80 and extensions to f80 go through memory. 362 // If optimizing, we lie about this though and handle it in 363 // InstructionSelectPreprocess so that dagcombine2 can hack on these. 364 if (Fast) { 365 setConvertAction(MVT::f32, MVT::f80, Expand); 366 setConvertAction(MVT::f64, MVT::f80, Expand); 367 setConvertAction(MVT::f80, MVT::f32, Expand); 368 setConvertAction(MVT::f80, MVT::f64, Expand); 369 } 370 } else if (X86ScalarSSEf32) { 371 // Use SSE for f32, x87 for f64. 372 // Set up the FP register classes. 373 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 374 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 375 376 // Use ANDPS to simulate FABS. 377 setOperationAction(ISD::FABS , MVT::f32, Custom); 378 379 // Use XORP to simulate FNEG. 380 setOperationAction(ISD::FNEG , MVT::f32, Custom); 381 382 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 383 384 // Use ANDPS and ORPS to simulate FCOPYSIGN. 385 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 386 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 387 388 // We don't support sin/cos/fmod 389 setOperationAction(ISD::FSIN , MVT::f32, Expand); 390 setOperationAction(ISD::FCOS , MVT::f32, Expand); 391 setOperationAction(ISD::FREM , MVT::f32, Expand); 392 393 // Expand FP immediates into loads from the stack, except for the special 394 // cases we handle. 395 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 396 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 397 addLegalFPImmediate(APFloat(+0.0f)); // xorps 398 addLegalFPImmediate(APFloat(+0.0)); // FLD0 399 addLegalFPImmediate(APFloat(+1.0)); // FLD1 400 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 401 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 402 403 // SSE <-> X87 conversions go through memory. If optimizing, we lie about 404 // this though and handle it in InstructionSelectPreprocess so that 405 // dagcombine2 can hack on these. 406 if (Fast) { 407 setConvertAction(MVT::f32, MVT::f64, Expand); 408 setConvertAction(MVT::f32, MVT::f80, Expand); 409 setConvertAction(MVT::f80, MVT::f32, Expand); 410 setConvertAction(MVT::f64, MVT::f32, Expand); 411 // And x87->x87 truncations also. 412 setConvertAction(MVT::f80, MVT::f64, Expand); 413 } 414 415 if (!UnsafeFPMath) { 416 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 417 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 418 } 419 } else { 420 // f32 and f64 in x87. 421 // Set up the FP register classes. 422 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 423 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 424 425 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 426 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 427 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 428 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 429 430 // Floating truncations go through memory. If optimizing, we lie about 431 // this though and handle it in InstructionSelectPreprocess so that 432 // dagcombine2 can hack on these. 433 if (Fast) { 434 setConvertAction(MVT::f80, MVT::f32, Expand); 435 setConvertAction(MVT::f64, MVT::f32, Expand); 436 setConvertAction(MVT::f80, MVT::f64, Expand); 437 } 438 439 if (!UnsafeFPMath) { 440 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 441 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 442 } 443 444 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 445 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 446 addLegalFPImmediate(APFloat(+0.0)); // FLD0 447 addLegalFPImmediate(APFloat(+1.0)); // FLD1 448 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 449 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 450 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 451 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 452 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 453 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 454 } 455 456 // Long double always uses X87. 457 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 458 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 459 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 460 { 461 setOperationAction(ISD::ConstantFP, MVT::f80, Expand); 462 APFloat TmpFlt(+0.0); 463 TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 464 addLegalFPImmediate(TmpFlt); // FLD0 465 TmpFlt.changeSign(); 466 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 467 APFloat TmpFlt2(+1.0); 468 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 469 addLegalFPImmediate(TmpFlt2); // FLD1 470 TmpFlt2.changeSign(); 471 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 472 } 473 474 if (!UnsafeFPMath) { 475 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 476 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 477 } 478 479 // Always use a library call for pow. 480 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 481 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 482 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 483 484 // First set operation action for all vector types to expand. Then we 485 // will selectively turn on ones that can be effectively codegen'd. 486 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 487 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 488 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); 489 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); 490 setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); 491 setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); 492 setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); 493 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 494 setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); 495 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 496 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 497 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 498 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 499 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 500 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); 501 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); 502 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 503 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 504 setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand); 505 setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand); 506 setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand); 507 setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand); 508 setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand); 509 setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand); 510 setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand); 511 setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand); 512 setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand); 513 setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand); 514 setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand); 515 setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand); 516 setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand); 517 setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand); 518 setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand); 519 setOperationAction(ISD::SHL, (MVT::ValueType)VT, Expand); 520 setOperationAction(ISD::SRA, (MVT::ValueType)VT, Expand); 521 setOperationAction(ISD::SRL, (MVT::ValueType)VT, Expand); 522 setOperationAction(ISD::ROTL, (MVT::ValueType)VT, Expand); 523 setOperationAction(ISD::ROTR, (MVT::ValueType)VT, Expand); 524 setOperationAction(ISD::BSWAP, (MVT::ValueType)VT, Expand); 525 } 526 527 if (Subtarget->hasMMX()) { 528 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass); 529 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass); 530 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass); 531 addRegisterClass(MVT::v1i64, X86::VR64RegisterClass); 532 533 // FIXME: add MMX packed arithmetics 534 535 setOperationAction(ISD::ADD, MVT::v8i8, Legal); 536 setOperationAction(ISD::ADD, MVT::v4i16, Legal); 537 setOperationAction(ISD::ADD, MVT::v2i32, Legal); 538 setOperationAction(ISD::ADD, MVT::v1i64, Legal); 539 540 setOperationAction(ISD::SUB, MVT::v8i8, Legal); 541 setOperationAction(ISD::SUB, MVT::v4i16, Legal); 542 setOperationAction(ISD::SUB, MVT::v2i32, Legal); 543 setOperationAction(ISD::SUB, MVT::v1i64, Legal); 544 545 setOperationAction(ISD::MULHS, MVT::v4i16, Legal); 546 setOperationAction(ISD::MUL, MVT::v4i16, Legal); 547 548 setOperationAction(ISD::AND, MVT::v8i8, Promote); 549 AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64); 550 setOperationAction(ISD::AND, MVT::v4i16, Promote); 551 AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64); 552 setOperationAction(ISD::AND, MVT::v2i32, Promote); 553 AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64); 554 setOperationAction(ISD::AND, MVT::v1i64, Legal); 555 556 setOperationAction(ISD::OR, MVT::v8i8, Promote); 557 AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64); 558 setOperationAction(ISD::OR, MVT::v4i16, Promote); 559 AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64); 560 setOperationAction(ISD::OR, MVT::v2i32, Promote); 561 AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64); 562 setOperationAction(ISD::OR, MVT::v1i64, Legal); 563 564 setOperationAction(ISD::XOR, MVT::v8i8, Promote); 565 AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64); 566 setOperationAction(ISD::XOR, MVT::v4i16, Promote); 567 AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64); 568 setOperationAction(ISD::XOR, MVT::v2i32, Promote); 569 AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64); 570 setOperationAction(ISD::XOR, MVT::v1i64, Legal); 571 572 setOperationAction(ISD::LOAD, MVT::v8i8, Promote); 573 AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64); 574 setOperationAction(ISD::LOAD, MVT::v4i16, Promote); 575 AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64); 576 setOperationAction(ISD::LOAD, MVT::v2i32, Promote); 577 AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64); 578 setOperationAction(ISD::LOAD, MVT::v1i64, Legal); 579 580 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); 581 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 582 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); 583 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); 584 585 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 586 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 587 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); 588 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); 589 590 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom); 591 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom); 592 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Custom); 593 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom); 594 } 595 596 if (Subtarget->hasSSE1()) { 597 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 598 599 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 600 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 601 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 602 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 603 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 604 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 605 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 606 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 607 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 608 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 609 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 610 } 611 612 if (Subtarget->hasSSE2()) { 613 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 614 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 615 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 616 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 617 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 618 619 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 620 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 621 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 622 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 623 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 624 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 625 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 626 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 627 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 628 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 629 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 630 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 631 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 632 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 633 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 634 635 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 636 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 637 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 638 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 639 // Implement v4f32 insert_vector_elt in terms of SSE2 v8i16 ones. 640 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 641 642 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 643 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 644 // Do not attempt to custom lower non-power-of-2 vectors 645 if (!isPowerOf2_32(MVT::getVectorNumElements(VT))) 646 continue; 647 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); 648 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); 649 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); 650 } 651 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 652 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 653 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 654 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 655 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 656 if (Subtarget->is64Bit()) 657 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 658 659 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 660 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 661 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); 662 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); 663 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); 664 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); 665 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); 666 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); 667 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); 668 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); 669 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 670 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); 671 } 672 673 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 674 675 // Custom lower v2i64 and v2f64 selects. 676 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 677 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 678 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 679 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 680 } 681 682 // We want to custom lower some of our intrinsics. 683 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 684 685 // We have target-specific dag combine patterns for the following nodes: 686 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 687 setTargetDAGCombine(ISD::SELECT); 688 689 computeRegisterProperties(); 690 691 // FIXME: These should be based on subtarget info. Plus, the values should 692 // be smaller when we are in optimizing for size mode. 693 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores 694 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores 695 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores 696 allowUnalignedMemoryAccesses = true; // x86 supports it! 697} 698 699/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 700/// the desired ByVal argument alignment. 701static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) { 702 if (MaxAlign == 16) 703 return; 704 if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 705 if (VTy->getBitWidth() == 128) 706 MaxAlign = 16; 707 else if (VTy->getBitWidth() == 64) 708 if (MaxAlign < 8) 709 MaxAlign = 8; 710 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 711 unsigned EltAlign = 0; 712 getMaxByValAlign(ATy->getElementType(), EltAlign); 713 if (EltAlign > MaxAlign) 714 MaxAlign = EltAlign; 715 } else if (const StructType *STy = dyn_cast<StructType>(Ty)) { 716 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 717 unsigned EltAlign = 0; 718 getMaxByValAlign(STy->getElementType(i), EltAlign); 719 if (EltAlign > MaxAlign) 720 MaxAlign = EltAlign; 721 if (MaxAlign == 16) 722 break; 723 } 724 } 725 return; 726} 727 728/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 729/// function arguments in the caller parameter area. For X86, aggregates 730/// that contains are placed at 16-byte boundaries while the rest are at 731/// 4-byte boundaries. 732unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const { 733 if (Subtarget->is64Bit()) 734 return getTargetData()->getABITypeAlignment(Ty); 735 unsigned Align = 4; 736 getMaxByValAlign(Ty, Align); 737 return Align; 738} 739 740/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 741/// jumptable. 742SDOperand X86TargetLowering::getPICJumpTableRelocBase(SDOperand Table, 743 SelectionDAG &DAG) const { 744 if (usesGlobalOffsetTable()) 745 return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy()); 746 if (!Subtarget->isPICStyleRIPRel()) 747 return DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()); 748 return Table; 749} 750 751//===----------------------------------------------------------------------===// 752// Return Value Calling Convention Implementation 753//===----------------------------------------------------------------------===// 754 755#include "X86GenCallingConv.inc" 756 757/// GetPossiblePreceedingTailCall - Get preceeding X86ISD::TAILCALL node if it 758/// exists skip possible ISD:TokenFactor. 759static SDOperand GetPossiblePreceedingTailCall(SDOperand Chain) { 760 if (Chain.getOpcode() == X86ISD::TAILCALL) { 761 return Chain; 762 } else if (Chain.getOpcode() == ISD::TokenFactor) { 763 if (Chain.getNumOperands() && 764 Chain.getOperand(0).getOpcode() == X86ISD::TAILCALL) 765 return Chain.getOperand(0); 766 } 767 return Chain; 768} 769 770/// LowerRET - Lower an ISD::RET node. 771SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { 772 assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args"); 773 774 SmallVector<CCValAssign, 16> RVLocs; 775 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 776 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 777 CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs); 778 CCInfo.AnalyzeReturn(Op.Val, RetCC_X86); 779 780 // If this is the first return lowered for this function, add the regs to the 781 // liveout set for the function. 782 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 783 for (unsigned i = 0; i != RVLocs.size(); ++i) 784 if (RVLocs[i].isRegLoc()) 785 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 786 } 787 SDOperand Chain = Op.getOperand(0); 788 789 // Handle tail call return. 790 Chain = GetPossiblePreceedingTailCall(Chain); 791 if (Chain.getOpcode() == X86ISD::TAILCALL) { 792 SDOperand TailCall = Chain; 793 SDOperand TargetAddress = TailCall.getOperand(1); 794 SDOperand StackAdjustment = TailCall.getOperand(2); 795 assert(((TargetAddress.getOpcode() == ISD::Register && 796 (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::ECX || 797 cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) || 798 TargetAddress.getOpcode() == ISD::TargetExternalSymbol || 799 TargetAddress.getOpcode() == ISD::TargetGlobalAddress) && 800 "Expecting an global address, external symbol, or register"); 801 assert(StackAdjustment.getOpcode() == ISD::Constant && 802 "Expecting a const value"); 803 804 SmallVector<SDOperand,8> Operands; 805 Operands.push_back(Chain.getOperand(0)); 806 Operands.push_back(TargetAddress); 807 Operands.push_back(StackAdjustment); 808 // Copy registers used by the call. Last operand is a flag so it is not 809 // copied. 810 for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) { 811 Operands.push_back(Chain.getOperand(i)); 812 } 813 return DAG.getNode(X86ISD::TC_RETURN, MVT::Other, &Operands[0], 814 Operands.size()); 815 } 816 817 // Regular return. 818 SDOperand Flag; 819 820 // Copy the result values into the output registers. 821 if (RVLocs.size() != 1 || !RVLocs[0].isRegLoc() || 822 RVLocs[0].getLocReg() != X86::ST0) { 823 for (unsigned i = 0; i != RVLocs.size(); ++i) { 824 CCValAssign &VA = RVLocs[i]; 825 assert(VA.isRegLoc() && "Can only return in registers!"); 826 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1), 827 Flag); 828 Flag = Chain.getValue(1); 829 } 830 } else { 831 // We need to handle a destination of ST0 specially, because it isn't really 832 // a register. 833 SDOperand Value = Op.getOperand(1); 834 835 // an XMM register onto the fp-stack. Do this with an FP_EXTEND to f80. 836 // This will get legalized into a load/store if it can't get optimized away. 837 if (isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) 838 Value = DAG.getNode(ISD::FP_EXTEND, MVT::f80, Value); 839 840 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 841 SDOperand Ops[] = { Chain, Value }; 842 Chain = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops, 2); 843 Flag = Chain.getValue(1); 844 } 845 846 SDOperand BytesToPop = DAG.getConstant(getBytesToPopOnReturn(), MVT::i16); 847 if (Flag.Val) 848 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop, Flag); 849 else 850 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop); 851} 852 853 854/// LowerCallResult - Lower the result values of an ISD::CALL into the 855/// appropriate copies out of appropriate physical registers. This assumes that 856/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call 857/// being lowered. The returns a SDNode with the same number of values as the 858/// ISD::CALL. 859SDNode *X86TargetLowering:: 860LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, 861 unsigned CallingConv, SelectionDAG &DAG) { 862 863 // Assign locations to each value returned by this call. 864 SmallVector<CCValAssign, 16> RVLocs; 865 bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0; 866 CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs); 867 CCInfo.AnalyzeCallResult(TheCall, RetCC_X86); 868 869 SmallVector<SDOperand, 8> ResultVals; 870 871 // Copy all of the result registers out of their specified physreg. 872 if (RVLocs.size() != 1 || RVLocs[0].getLocReg() != X86::ST0) { 873 for (unsigned i = 0; i != RVLocs.size(); ++i) { 874 Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(), 875 RVLocs[i].getValVT(), InFlag).getValue(1); 876 InFlag = Chain.getValue(2); 877 ResultVals.push_back(Chain.getValue(0)); 878 } 879 } else { 880 // Copies from the FP stack are special, as ST0 isn't a valid register 881 // before the fp stackifier runs. 882 883 // Copy ST0 into an RFP register with FP_GET_RESULT. If this will end up 884 // in an SSE register, copy it out as F80 and do a truncate, otherwise use 885 // the specified value type. 886 MVT::ValueType GetResultTy = RVLocs[0].getValVT(); 887 if (isScalarFPTypeInSSEReg(GetResultTy)) 888 GetResultTy = MVT::f80; 889 SDVTList Tys = DAG.getVTList(GetResultTy, MVT::Other, MVT::Flag); 890 891 SDOperand GROps[] = { Chain, InFlag }; 892 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, GROps, 2); 893 Chain = RetVal.getValue(1); 894 InFlag = RetVal.getValue(2); 895 896 // If we want the result in an SSE register, use an FP_TRUNCATE to get it 897 // there. 898 if (GetResultTy != RVLocs[0].getValVT()) 899 RetVal = DAG.getNode(ISD::FP_ROUND, RVLocs[0].getValVT(), RetVal, 900 // This truncation won't change the value. 901 DAG.getIntPtrConstant(1)); 902 903 ResultVals.push_back(RetVal); 904 } 905 906 // Merge everything together with a MERGE_VALUES node. 907 ResultVals.push_back(Chain); 908 return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(), 909 &ResultVals[0], ResultVals.size()).Val; 910} 911 912/// LowerCallResultToTwo64BitRegs - Lower the result values of an x86-64 913/// ISD::CALL where the results are known to be in two 64-bit registers, 914/// e.g. XMM0 and XMM1. This simplify store the two values back to the 915/// fixed stack slot allocated for StructRet. 916SDNode *X86TargetLowering:: 917LowerCallResultToTwo64BitRegs(SDOperand Chain, SDOperand InFlag, 918 SDNode *TheCall, unsigned Reg1, unsigned Reg2, 919 MVT::ValueType VT, SelectionDAG &DAG) { 920 SDOperand RetVal1 = DAG.getCopyFromReg(Chain, Reg1, VT, InFlag); 921 Chain = RetVal1.getValue(1); 922 InFlag = RetVal1.getValue(2); 923 SDOperand RetVal2 = DAG.getCopyFromReg(Chain, Reg2, VT, InFlag); 924 Chain = RetVal2.getValue(1); 925 InFlag = RetVal2.getValue(2); 926 SDOperand FIN = TheCall->getOperand(5); 927 Chain = DAG.getStore(Chain, RetVal1, FIN, NULL, 0); 928 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); 929 Chain = DAG.getStore(Chain, RetVal2, FIN, NULL, 0); 930 return Chain.Val; 931} 932 933/// LowerCallResultToTwoX87Regs - Lower the result values of an x86-64 ISD::CALL 934/// where the results are known to be in ST0 and ST1. 935SDNode *X86TargetLowering:: 936LowerCallResultToTwoX87Regs(SDOperand Chain, SDOperand InFlag, 937 SDNode *TheCall, SelectionDAG &DAG) { 938 SmallVector<SDOperand, 8> ResultVals; 939 const MVT::ValueType VTs[] = { MVT::f80, MVT::f80, MVT::Other, MVT::Flag }; 940 SDVTList Tys = DAG.getVTList(VTs, 4); 941 SDOperand Ops[] = { Chain, InFlag }; 942 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT2, Tys, Ops, 2); 943 Chain = RetVal.getValue(2); 944 SDOperand FIN = TheCall->getOperand(5); 945 Chain = DAG.getStore(Chain, RetVal.getValue(1), FIN, NULL, 0); 946 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(16)); 947 Chain = DAG.getStore(Chain, RetVal, FIN, NULL, 0); 948 return Chain.Val; 949} 950 951//===----------------------------------------------------------------------===// 952// C & StdCall & Fast Calling Convention implementation 953//===----------------------------------------------------------------------===// 954// StdCall calling convention seems to be standard for many Windows' API 955// routines and around. It differs from C calling convention just a little: 956// callee should clean up the stack, not caller. Symbols should be also 957// decorated in some fancy way :) It doesn't support any vector arguments. 958// For info on fast calling convention see Fast Calling Convention (tail call) 959// implementation LowerX86_32FastCCCallTo. 960 961/// AddLiveIn - This helper function adds the specified physical register to the 962/// MachineFunction as a live in value. It also creates a corresponding virtual 963/// register for it. 964static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg, 965 const TargetRegisterClass *RC) { 966 assert(RC->contains(PReg) && "Not the correct regclass!"); 967 unsigned VReg = MF.getRegInfo().createVirtualRegister(RC); 968 MF.getRegInfo().addLiveIn(PReg, VReg); 969 return VReg; 970} 971 972// Determines whether a CALL node uses struct return semantics. 973static bool CallIsStructReturn(SDOperand Op) { 974 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 975 if (!NumOps) 976 return false; 977 978 ConstantSDNode *Flags = cast<ConstantSDNode>(Op.getOperand(6)); 979 return Flags->getValue() & ISD::ParamFlags::StructReturn; 980} 981 982// Determines whether a FORMAL_ARGUMENTS node uses struct return semantics. 983static bool ArgsAreStructReturn(SDOperand Op) { 984 unsigned NumArgs = Op.Val->getNumValues() - 1; 985 if (!NumArgs) 986 return false; 987 988 ConstantSDNode *Flags = cast<ConstantSDNode>(Op.getOperand(3)); 989 return Flags->getValue() & ISD::ParamFlags::StructReturn; 990} 991 992// Determines whether a CALL or FORMAL_ARGUMENTS node requires the callee to pop 993// its own arguments. Callee pop is necessary to support tail calls. 994bool X86TargetLowering::IsCalleePop(SDOperand Op) { 995 bool IsVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 996 if (IsVarArg) 997 return false; 998 999 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { 1000 default: 1001 return false; 1002 case CallingConv::X86_StdCall: 1003 return !Subtarget->is64Bit(); 1004 case CallingConv::X86_FastCall: 1005 return !Subtarget->is64Bit(); 1006 case CallingConv::Fast: 1007 return PerformTailCallOpt; 1008 } 1009} 1010 1011// Selects the correct CCAssignFn for a CALL or FORMAL_ARGUMENTS node. 1012CCAssignFn *X86TargetLowering::CCAssignFnForNode(SDOperand Op) const { 1013 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1014 1015 if (Subtarget->is64Bit()) 1016 if (CC == CallingConv::Fast && PerformTailCallOpt) 1017 return CC_X86_64_TailCall; 1018 else 1019 return CC_X86_64_C; 1020 1021 if (CC == CallingConv::X86_FastCall) 1022 return CC_X86_32_FastCall; 1023 else if (CC == CallingConv::Fast && PerformTailCallOpt) 1024 return CC_X86_32_TailCall; 1025 else 1026 return CC_X86_32_C; 1027} 1028 1029// Selects the appropriate decoration to apply to a MachineFunction containing a 1030// given FORMAL_ARGUMENTS node. 1031NameDecorationStyle 1032X86TargetLowering::NameDecorationForFORMAL_ARGUMENTS(SDOperand Op) { 1033 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1034 if (CC == CallingConv::X86_FastCall) 1035 return FastCall; 1036 else if (CC == CallingConv::X86_StdCall) 1037 return StdCall; 1038 return None; 1039} 1040 1041 1042// IsPossiblyOverwrittenArgumentOfTailCall - Check if the operand could possibly 1043// be overwritten when lowering the outgoing arguments in a tail call. Currently 1044// the implementation of this call is very conservative and assumes all 1045// arguments sourcing from FORMAL_ARGUMENTS or a CopyFromReg with virtual 1046// registers would be overwritten by direct lowering. 1047// Possible improvement: 1048// Check FORMAL_ARGUMENTS corresponding MERGE_VALUES for CopyFromReg nodes 1049// indicating inreg passed arguments which also need not be lowered to a safe 1050// stack slot. 1051static bool IsPossiblyOverwrittenArgumentOfTailCall(SDOperand Op) { 1052 RegisterSDNode * OpReg = NULL; 1053 if (Op.getOpcode() == ISD::FORMAL_ARGUMENTS || 1054 (Op.getOpcode()== ISD::CopyFromReg && 1055 (OpReg = cast<RegisterSDNode>(Op.getOperand(1))) && 1056 OpReg->getReg() >= MRegisterInfo::FirstVirtualRegister)) 1057 return true; 1058 return false; 1059} 1060 1061// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1062// by "Src" to address "Dst" with size and alignment information specified by 1063// the specific parameter attribute. The copy will be passed as a byval function 1064// parameter. 1065static SDOperand 1066CreateCopyOfByValArgument(SDOperand Src, SDOperand Dst, SDOperand Chain, 1067 unsigned Flags, SelectionDAG &DAG) { 1068 unsigned Align = 1 << 1069 ((Flags & ISD::ParamFlags::ByValAlign) >> ISD::ParamFlags::ByValAlignOffs); 1070 unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >> 1071 ISD::ParamFlags::ByValSizeOffs; 1072 SDOperand AlignNode = DAG.getConstant(Align, MVT::i32); 1073 SDOperand SizeNode = DAG.getConstant(Size, MVT::i32); 1074 SDOperand AlwaysInline = DAG.getConstant(1, MVT::i32); 1075 return DAG.getMemcpy(Chain, Dst, Src, SizeNode, AlignNode, AlwaysInline); 1076} 1077 1078SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG, 1079 const CCValAssign &VA, 1080 MachineFrameInfo *MFI, 1081 SDOperand Root, unsigned i) { 1082 // Create the nodes corresponding to a load from this parameter slot. 1083 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue(); 1084 bool isByVal = Flags & ISD::ParamFlags::ByVal; 1085 1086 // FIXME: For now, all byval parameter objects are marked mutable. This 1087 // can be changed with more analysis. 1088 int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, 1089 VA.getLocMemOffset(), !isByVal); 1090 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 1091 if (isByVal) 1092 return FIN; 1093 return DAG.getLoad(VA.getValVT(), Root, FIN, 1094 &PseudoSourceValue::getFixedStack(), FI); 1095} 1096 1097SDOperand 1098X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { 1099 MachineFunction &MF = DAG.getMachineFunction(); 1100 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1101 1102 const Function* Fn = MF.getFunction(); 1103 if (Fn->hasExternalLinkage() && 1104 Subtarget->isTargetCygMing() && 1105 Fn->getName() == "main") 1106 FuncInfo->setForceFramePointer(true); 1107 1108 // Decorate the function name. 1109 FuncInfo->setDecorationStyle(NameDecorationForFORMAL_ARGUMENTS(Op)); 1110 1111 MachineFrameInfo *MFI = MF.getFrameInfo(); 1112 SDOperand Root = Op.getOperand(0); 1113 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1114 unsigned CC = MF.getFunction()->getCallingConv(); 1115 bool Is64Bit = Subtarget->is64Bit(); 1116 1117 assert(!(isVarArg && CC == CallingConv::Fast) && 1118 "Var args not supported with calling convention fastcc"); 1119 1120 // Assign locations to all of the incoming arguments. 1121 SmallVector<CCValAssign, 16> ArgLocs; 1122 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1123 CCInfo.AnalyzeFormalArguments(Op.Val, CCAssignFnForNode(Op)); 1124 1125 SmallVector<SDOperand, 8> ArgValues; 1126 unsigned LastVal = ~0U; 1127 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1128 CCValAssign &VA = ArgLocs[i]; 1129 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1130 // places. 1131 assert(VA.getValNo() != LastVal && 1132 "Don't support value assigned to multiple locs yet"); 1133 LastVal = VA.getValNo(); 1134 1135 if (VA.isRegLoc()) { 1136 MVT::ValueType RegVT = VA.getLocVT(); 1137 TargetRegisterClass *RC; 1138 if (RegVT == MVT::i32) 1139 RC = X86::GR32RegisterClass; 1140 else if (Is64Bit && RegVT == MVT::i64) 1141 RC = X86::GR64RegisterClass; 1142 else if (RegVT == MVT::f32) 1143 RC = X86::FR32RegisterClass; 1144 else if (RegVT == MVT::f64) 1145 RC = X86::FR64RegisterClass; 1146 else { 1147 assert(MVT::isVector(RegVT)); 1148 if (Is64Bit && MVT::getSizeInBits(RegVT) == 64) { 1149 RC = X86::GR64RegisterClass; // MMX values are passed in GPRs. 1150 RegVT = MVT::i64; 1151 } else 1152 RC = X86::VR128RegisterClass; 1153 } 1154 1155 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 1156 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 1157 1158 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1159 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1160 // right size. 1161 if (VA.getLocInfo() == CCValAssign::SExt) 1162 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 1163 DAG.getValueType(VA.getValVT())); 1164 else if (VA.getLocInfo() == CCValAssign::ZExt) 1165 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 1166 DAG.getValueType(VA.getValVT())); 1167 1168 if (VA.getLocInfo() != CCValAssign::Full) 1169 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 1170 1171 // Handle MMX values passed in GPRs. 1172 if (Is64Bit && RegVT != VA.getLocVT() && RC == X86::GR64RegisterClass && 1173 MVT::getSizeInBits(RegVT) == 64) 1174 ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); 1175 1176 ArgValues.push_back(ArgValue); 1177 } else { 1178 assert(VA.isMemLoc()); 1179 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i)); 1180 } 1181 } 1182 1183 unsigned StackSize = CCInfo.getNextStackOffset(); 1184 // align stack specially for tail calls 1185 if (CC == CallingConv::Fast) 1186 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1187 1188 // If the function takes variable number of arguments, make a frame index for 1189 // the start of the first vararg value... for expansion of llvm.va_start. 1190 if (isVarArg) { 1191 if (Is64Bit || CC != CallingConv::X86_FastCall) { 1192 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); 1193 } 1194 if (Is64Bit) { 1195 static const unsigned GPR64ArgRegs[] = { 1196 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1197 }; 1198 static const unsigned XMMArgRegs[] = { 1199 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1200 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1201 }; 1202 1203 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 6); 1204 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1205 1206 // For X86-64, if there are vararg parameters that are passed via 1207 // registers, then we must store them to their spots on the stack so they 1208 // may be loaded by deferencing the result of va_next. 1209 VarArgsGPOffset = NumIntRegs * 8; 1210 VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16; 1211 RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16); 1212 1213 // Store the integer parameter registers. 1214 SmallVector<SDOperand, 8> MemOps; 1215 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 1216 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1217 DAG.getIntPtrConstant(VarArgsGPOffset)); 1218 for (; NumIntRegs != 6; ++NumIntRegs) { 1219 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], 1220 X86::GR64RegisterClass); 1221 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1222 SDOperand Store = 1223 DAG.getStore(Val.getValue(1), Val, FIN, 1224 &PseudoSourceValue::getFixedStack(), 1225 RegSaveFrameIndex); 1226 MemOps.push_back(Store); 1227 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1228 DAG.getIntPtrConstant(8)); 1229 } 1230 1231 // Now store the XMM (fp + vector) parameter registers. 1232 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1233 DAG.getIntPtrConstant(VarArgsFPOffset)); 1234 for (; NumXMMRegs != 8; ++NumXMMRegs) { 1235 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 1236 X86::VR128RegisterClass); 1237 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); 1238 SDOperand Store = 1239 DAG.getStore(Val.getValue(1), Val, FIN, 1240 &PseudoSourceValue::getFixedStack(), 1241 RegSaveFrameIndex); 1242 MemOps.push_back(Store); 1243 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1244 DAG.getIntPtrConstant(16)); 1245 } 1246 if (!MemOps.empty()) 1247 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 1248 &MemOps[0], MemOps.size()); 1249 } 1250 } 1251 1252 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1253 // arguments and the arguments after the retaddr has been pushed are 1254 // aligned. 1255 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1256 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1257 (StackSize & 7) == 0) 1258 StackSize += 4; 1259 1260 ArgValues.push_back(Root); 1261 1262 // Some CCs need callee pop. 1263 if (IsCalleePop(Op)) { 1264 BytesToPopOnReturn = StackSize; // Callee pops everything. 1265 BytesCallerReserves = 0; 1266 } else { 1267 BytesToPopOnReturn = 0; // Callee pops nothing. 1268 // If this is an sret function, the return should pop the hidden pointer. 1269 if (!Is64Bit && ArgsAreStructReturn(Op)) 1270 BytesToPopOnReturn = 4; 1271 BytesCallerReserves = StackSize; 1272 } 1273 1274 if (!Is64Bit) { 1275 RegSaveFrameIndex = 0xAAAAAAA; // RegSaveFrameIndex is X86-64 only. 1276 if (CC == CallingConv::X86_FastCall) 1277 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 1278 } 1279 1280 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 1281 1282 // Return the new list of results. 1283 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1284 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1285} 1286 1287SDOperand 1288X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG, 1289 const SDOperand &StackPtr, 1290 const CCValAssign &VA, 1291 SDOperand Chain, 1292 SDOperand Arg) { 1293 unsigned LocMemOffset = VA.getLocMemOffset(); 1294 SDOperand PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1295 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1296 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1297 unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue(); 1298 if (Flags & ISD::ParamFlags::ByVal) { 1299 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG); 1300 } 1301 return DAG.getStore(Chain, Arg, PtrOff, 1302 &PseudoSourceValue::getStack(), LocMemOffset); 1303} 1304 1305/// ClassifyX86_64SRetCallReturn - Classify how to implement a x86-64 1306/// struct return call to the specified function. X86-64 ABI specifies 1307/// some SRet calls are actually returned in registers. Since current 1308/// LLVM cannot represent multi-value calls, they are represent as 1309/// calls where the results are passed in a hidden struct provided by 1310/// the caller. This function examines the type of the struct to 1311/// determine the correct way to implement the call. 1312X86::X86_64SRet 1313X86TargetLowering::ClassifyX86_64SRetCallReturn(const Function *Fn) { 1314 // FIXME: Disabled for now. 1315 return X86::InMemory; 1316 1317 const PointerType *PTy = cast<PointerType>(Fn->arg_begin()->getType()); 1318 const Type *RTy = PTy->getElementType(); 1319 unsigned Size = getTargetData()->getABITypeSize(RTy); 1320 if (Size != 16 && Size != 32) 1321 return X86::InMemory; 1322 1323 if (Size == 32) { 1324 const StructType *STy = dyn_cast<StructType>(RTy); 1325 if (!STy) return X86::InMemory; 1326 if (STy->getNumElements() == 2 && 1327 STy->getElementType(0) == Type::X86_FP80Ty && 1328 STy->getElementType(1) == Type::X86_FP80Ty) 1329 return X86::InX87; 1330 } 1331 1332 bool AllFP = true; 1333 for (Type::subtype_iterator I = RTy->subtype_begin(), E = RTy->subtype_end(); 1334 I != E; ++I) { 1335 const Type *STy = I->get(); 1336 if (!STy->isFPOrFPVector()) { 1337 AllFP = false; 1338 break; 1339 } 1340 } 1341 1342 if (AllFP) 1343 return X86::InSSE; 1344 return X86::InGPR64; 1345} 1346 1347void X86TargetLowering::X86_64AnalyzeSRetCallOperands(SDNode *TheCall, 1348 CCAssignFn *Fn, 1349 CCState &CCInfo) { 1350 unsigned NumOps = (TheCall->getNumOperands() - 5) / 2; 1351 for (unsigned i = 1; i != NumOps; ++i) { 1352 MVT::ValueType ArgVT = TheCall->getOperand(5+2*i).getValueType(); 1353 SDOperand FlagOp = TheCall->getOperand(5+2*i+1); 1354 unsigned ArgFlags =cast<ConstantSDNode>(FlagOp)->getValue(); 1355 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo)) { 1356 cerr << "Call operand #" << i << " has unhandled type " 1357 << MVT::getValueTypeString(ArgVT) << "\n"; 1358 abort(); 1359 } 1360 } 1361} 1362 1363SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { 1364 MachineFunction &MF = DAG.getMachineFunction(); 1365 SDOperand Chain = Op.getOperand(0); 1366 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1367 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1368 bool IsTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0 1369 && CC == CallingConv::Fast && PerformTailCallOpt; 1370 SDOperand Callee = Op.getOperand(4); 1371 bool Is64Bit = Subtarget->is64Bit(); 1372 bool IsStructRet = CallIsStructReturn(Op); 1373 1374 assert(!(isVarArg && CC == CallingConv::Fast) && 1375 "Var args not supported with calling convention fastcc"); 1376 1377 // Analyze operands of the call, assigning locations to each operand. 1378 SmallVector<CCValAssign, 16> ArgLocs; 1379 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1380 CCAssignFn *CCFn = CCAssignFnForNode(Op); 1381 1382 X86::X86_64SRet SRetMethod = X86::InMemory; 1383 if (Is64Bit && IsStructRet) 1384 // FIXME: We can't figure out type of the sret structure for indirect 1385 // calls. We need to copy more information from CallSite to the ISD::CALL 1386 // node. 1387 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1388 SRetMethod = 1389 ClassifyX86_64SRetCallReturn(dyn_cast<Function>(G->getGlobal())); 1390 1391 // UGLY HACK! For x86-64, some 128-bit aggregates are returns in a pair of 1392 // registers. Unfortunately, llvm does not support i128 yet so we pretend it's 1393 // a sret call. 1394 if (SRetMethod != X86::InMemory) 1395 X86_64AnalyzeSRetCallOperands(Op.Val, CCFn, CCInfo); 1396 else 1397 CCInfo.AnalyzeCallOperands(Op.Val, CCFn); 1398 1399 // Get a count of how many bytes are to be pushed on the stack. 1400 unsigned NumBytes = CCInfo.getNextStackOffset(); 1401 if (CC == CallingConv::Fast) 1402 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 1403 1404 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1405 // arguments and the arguments after the retaddr has been pushed are aligned. 1406 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1407 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1408 (NumBytes & 7) == 0) 1409 NumBytes += 4; 1410 1411 int FPDiff = 0; 1412 if (IsTailCall) { 1413 // Lower arguments at fp - stackoffset + fpdiff. 1414 unsigned NumBytesCallerPushed = 1415 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 1416 FPDiff = NumBytesCallerPushed - NumBytes; 1417 1418 // Set the delta of movement of the returnaddr stackslot. 1419 // But only set if delta is greater than previous delta. 1420 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 1421 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 1422 } 1423 1424 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes)); 1425 1426 SDOperand RetAddrFrIdx, NewRetAddrFrIdx; 1427 if (IsTailCall) { 1428 // Adjust the Return address stack slot. 1429 if (FPDiff) { 1430 MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32; 1431 RetAddrFrIdx = getReturnAddressFrameIndex(DAG); 1432 // Load the "old" Return address. 1433 RetAddrFrIdx = 1434 DAG.getLoad(VT, Chain,RetAddrFrIdx, NULL, 0); 1435 // Calculate the new stack slot for the return address. 1436 int SlotSize = Is64Bit ? 8 : 4; 1437 int NewReturnAddrFI = 1438 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize); 1439 NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 1440 Chain = SDOperand(RetAddrFrIdx.Val, 1); 1441 } 1442 } 1443 1444 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1445 SmallVector<SDOperand, 8> MemOpChains; 1446 1447 SDOperand StackPtr; 1448 1449 // Walk the register/memloc assignments, inserting copies/loads. For tail 1450 // calls, lower arguments which could otherwise be possibly overwritten to the 1451 // stack slot where they would go on normal function calls. 1452 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1453 CCValAssign &VA = ArgLocs[i]; 1454 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1455 1456 // Promote the value if needed. 1457 switch (VA.getLocInfo()) { 1458 default: assert(0 && "Unknown loc info!"); 1459 case CCValAssign::Full: break; 1460 case CCValAssign::SExt: 1461 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1462 break; 1463 case CCValAssign::ZExt: 1464 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1465 break; 1466 case CCValAssign::AExt: 1467 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1468 break; 1469 } 1470 1471 if (VA.isRegLoc()) { 1472 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1473 } else { 1474 if (!IsTailCall || IsPossiblyOverwrittenArgumentOfTailCall(Arg)) { 1475 assert(VA.isMemLoc()); 1476 if (StackPtr.Val == 0) 1477 StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy()); 1478 1479 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1480 Arg)); 1481 } 1482 } 1483 } 1484 1485 if (!MemOpChains.empty()) 1486 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1487 &MemOpChains[0], MemOpChains.size()); 1488 1489 // Build a sequence of copy-to-reg nodes chained together with token chain 1490 // and flag operands which copy the outgoing args into registers. 1491 SDOperand InFlag; 1492 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1493 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1494 InFlag); 1495 InFlag = Chain.getValue(1); 1496 } 1497 1498 if (IsTailCall) 1499 InFlag = SDOperand(); // ??? Isn't this nuking the preceding loop's output? 1500 1501 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1502 // GOT pointer. 1503 // Does not work with tail call since ebx is not restored correctly by 1504 // tailcaller. TODO: at least for x86 - verify for x86-64 1505 if (!IsTailCall && !Is64Bit && 1506 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1507 Subtarget->isPICStyleGOT()) { 1508 Chain = DAG.getCopyToReg(Chain, X86::EBX, 1509 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 1510 InFlag); 1511 InFlag = Chain.getValue(1); 1512 } 1513 1514 if (Is64Bit && isVarArg) { 1515 // From AMD64 ABI document: 1516 // For calls that may call functions that use varargs or stdargs 1517 // (prototype-less calls or calls to functions containing ellipsis (...) in 1518 // the declaration) %al is used as hidden argument to specify the number 1519 // of SSE registers used. The contents of %al do not need to match exactly 1520 // the number of registers, but must be an ubound on the number of SSE 1521 // registers used and is in the range 0 - 8 inclusive. 1522 1523 // Count the number of XMM registers allocated. 1524 static const unsigned XMMArgRegs[] = { 1525 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1526 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1527 }; 1528 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1529 1530 Chain = DAG.getCopyToReg(Chain, X86::AL, 1531 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 1532 InFlag = Chain.getValue(1); 1533 } 1534 1535 // For tail calls lower the arguments to the 'real' stack slot. 1536 if (IsTailCall) { 1537 SmallVector<SDOperand, 8> MemOpChains2; 1538 SDOperand FIN; 1539 int FI = 0; 1540 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1541 CCValAssign &VA = ArgLocs[i]; 1542 if (!VA.isRegLoc()) { 1543 assert(VA.isMemLoc()); 1544 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1545 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1546 unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue(); 1547 // Create frame index. 1548 int32_t Offset = VA.getLocMemOffset()+FPDiff; 1549 uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8; 1550 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); 1551 FIN = DAG.getFrameIndex(FI, MVT::i32); 1552 SDOperand Source = Arg; 1553 if (IsPossiblyOverwrittenArgumentOfTailCall(Arg)) { 1554 // Copy from stack slots to stack slot of a tail called function. This 1555 // needs to be done because if we would lower the arguments directly 1556 // to their real stack slot we might end up overwriting each other. 1557 // Get source stack slot. 1558 Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 1559 if (StackPtr.Val == 0) 1560 StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy()); 1561 Source = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, Source); 1562 if ((Flags & ISD::ParamFlags::ByVal)==0) 1563 Source = DAG.getLoad(VA.getValVT(), Chain, Source, NULL, 0); 1564 } 1565 1566 if (Flags & ISD::ParamFlags::ByVal) { 1567 // Copy relative to framepointer. 1568 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, Chain, 1569 Flags, DAG)); 1570 } else { 1571 // Store relative to framepointer. 1572 MemOpChains2.push_back( 1573 DAG.getStore(Chain, Source, FIN, 1574 &PseudoSourceValue::getFixedStack(), FI)); 1575 } 1576 } 1577 } 1578 1579 if (!MemOpChains2.empty()) 1580 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1581 &MemOpChains2[0], MemOpChains2.size()); 1582 1583 // Store the return address to the appropriate stack slot. 1584 if (FPDiff) 1585 Chain = DAG.getStore(Chain,RetAddrFrIdx, NewRetAddrFrIdx, NULL, 0); 1586 } 1587 1588 // If the callee is a GlobalAddress node (quite common, every direct call is) 1589 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1590 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1591 // We should use extra load for direct calls to dllimported functions in 1592 // non-JIT mode. 1593 if ((IsTailCall || !Is64Bit || 1594 getTargetMachine().getCodeModel() != CodeModel::Large) 1595 && !Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1596 getTargetMachine(), true)) 1597 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1598 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1599 if (IsTailCall || !Is64Bit || 1600 getTargetMachine().getCodeModel() != CodeModel::Large) 1601 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1602 } else if (IsTailCall) { 1603 assert(Callee.getOpcode() == ISD::LOAD && 1604 "Function destination must be loaded into virtual register"); 1605 unsigned Opc = Is64Bit ? X86::R9 : X86::ECX; 1606 1607 Chain = DAG.getCopyToReg(Chain, 1608 DAG.getRegister(Opc, getPointerTy()) , 1609 Callee,InFlag); 1610 Callee = DAG.getRegister(Opc, getPointerTy()); 1611 // Add register as live out. 1612 DAG.getMachineFunction().getRegInfo().addLiveOut(Opc); 1613 } 1614 1615 // Returns a chain & a flag for retval copy to use. 1616 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1617 SmallVector<SDOperand, 8> Ops; 1618 1619 if (IsTailCall) { 1620 Ops.push_back(Chain); 1621 Ops.push_back(DAG.getIntPtrConstant(NumBytes)); 1622 Ops.push_back(DAG.getIntPtrConstant(0)); 1623 if (InFlag.Val) 1624 Ops.push_back(InFlag); 1625 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1626 InFlag = Chain.getValue(1); 1627 1628 // Returns a chain & a flag for retval copy to use. 1629 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1630 Ops.clear(); 1631 } 1632 1633 Ops.push_back(Chain); 1634 Ops.push_back(Callee); 1635 1636 if (IsTailCall) 1637 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 1638 1639 // Add an implicit use GOT pointer in EBX. 1640 if (!IsTailCall && !Is64Bit && 1641 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1642 Subtarget->isPICStyleGOT()) 1643 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 1644 1645 // Add argument registers to the end of the list so that they are known live 1646 // into the call. 1647 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1648 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1649 RegsToPass[i].second.getValueType())); 1650 1651 if (InFlag.Val) 1652 Ops.push_back(InFlag); 1653 1654 if (IsTailCall) { 1655 assert(InFlag.Val && 1656 "Flag must be set. Depend on flag being set in LowerRET"); 1657 Chain = DAG.getNode(X86ISD::TAILCALL, 1658 Op.Val->getVTList(), &Ops[0], Ops.size()); 1659 1660 return SDOperand(Chain.Val, Op.ResNo); 1661 } 1662 1663 Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size()); 1664 InFlag = Chain.getValue(1); 1665 1666 // Create the CALLSEQ_END node. 1667 unsigned NumBytesForCalleeToPush; 1668 if (IsCalleePop(Op)) 1669 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 1670 else if (!Is64Bit && IsStructRet) 1671 // If this is is a call to a struct-return function, the callee 1672 // pops the hidden struct pointer, so we have to push it back. 1673 // This is common for Darwin/X86, Linux & Mingw32 targets. 1674 NumBytesForCalleeToPush = 4; 1675 else 1676 NumBytesForCalleeToPush = 0; // Callee pops nothing. 1677 1678 // Returns a flag for retval copy to use. 1679 Chain = DAG.getCALLSEQ_END(Chain, 1680 DAG.getIntPtrConstant(NumBytes), 1681 DAG.getIntPtrConstant(NumBytesForCalleeToPush), 1682 InFlag); 1683 InFlag = Chain.getValue(1); 1684 1685 // Handle result values, copying them out of physregs into vregs that we 1686 // return. 1687 switch (SRetMethod) { 1688 default: 1689 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1690 case X86::InGPR64: 1691 return SDOperand(LowerCallResultToTwo64BitRegs(Chain, InFlag, Op.Val, 1692 X86::RAX, X86::RDX, 1693 MVT::i64, DAG), Op.ResNo); 1694 case X86::InSSE: 1695 return SDOperand(LowerCallResultToTwo64BitRegs(Chain, InFlag, Op.Val, 1696 X86::XMM0, X86::XMM1, 1697 MVT::f64, DAG), Op.ResNo); 1698 case X86::InX87: 1699 return SDOperand(LowerCallResultToTwoX87Regs(Chain, InFlag, Op.Val, DAG), 1700 Op.ResNo); 1701 } 1702} 1703 1704 1705//===----------------------------------------------------------------------===// 1706// Fast Calling Convention (tail call) implementation 1707//===----------------------------------------------------------------------===// 1708 1709// Like std call, callee cleans arguments, convention except that ECX is 1710// reserved for storing the tail called function address. Only 2 registers are 1711// free for argument passing (inreg). Tail call optimization is performed 1712// provided: 1713// * tailcallopt is enabled 1714// * caller/callee are fastcc 1715// * elf/pic is disabled OR 1716// * elf/pic enabled + callee is in module + callee has 1717// visibility protected or hidden 1718// To keep the stack aligned according to platform abi the function 1719// GetAlignedArgumentStackSize ensures that argument delta is always multiples 1720// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 1721// If a tail called function callee has more arguments than the caller the 1722// caller needs to make sure that there is room to move the RETADDR to. This is 1723// achieved by reserving an area the size of the argument delta right after the 1724// original REtADDR, but before the saved framepointer or the spilled registers 1725// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 1726// stack layout: 1727// arg1 1728// arg2 1729// RETADDR 1730// [ new RETADDR 1731// move area ] 1732// (possible EBP) 1733// ESI 1734// EDI 1735// local1 .. 1736 1737/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 1738/// for a 16 byte align requirement. 1739unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 1740 SelectionDAG& DAG) { 1741 if (PerformTailCallOpt) { 1742 MachineFunction &MF = DAG.getMachineFunction(); 1743 const TargetMachine &TM = MF.getTarget(); 1744 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 1745 unsigned StackAlignment = TFI.getStackAlignment(); 1746 uint64_t AlignMask = StackAlignment - 1; 1747 int64_t Offset = StackSize; 1748 unsigned SlotSize = Subtarget->is64Bit() ? 8 : 4; 1749 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 1750 // Number smaller than 12 so just add the difference. 1751 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 1752 } else { 1753 // Mask out lower bits, add stackalignment once plus the 12 bytes. 1754 Offset = ((~AlignMask) & Offset) + StackAlignment + 1755 (StackAlignment-SlotSize); 1756 } 1757 StackSize = Offset; 1758 } 1759 return StackSize; 1760} 1761 1762/// IsEligibleForTailCallElimination - Check to see whether the next instruction 1763/// following the call is a return. A function is eligible if caller/callee 1764/// calling conventions match, currently only fastcc supports tail calls, and 1765/// the function CALL is immediatly followed by a RET. 1766bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call, 1767 SDOperand Ret, 1768 SelectionDAG& DAG) const { 1769 if (!PerformTailCallOpt) 1770 return false; 1771 1772 // Check whether CALL node immediatly preceeds the RET node and whether the 1773 // return uses the result of the node or is a void return. 1774 unsigned NumOps = Ret.getNumOperands(); 1775 if ((NumOps == 1 && 1776 (Ret.getOperand(0) == SDOperand(Call.Val,1) || 1777 Ret.getOperand(0) == SDOperand(Call.Val,0))) || 1778 (NumOps > 1 && 1779 Ret.getOperand(0) == SDOperand(Call.Val,Call.Val->getNumValues()-1) && 1780 Ret.getOperand(1) == SDOperand(Call.Val,0))) { 1781 MachineFunction &MF = DAG.getMachineFunction(); 1782 unsigned CallerCC = MF.getFunction()->getCallingConv(); 1783 unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue(); 1784 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 1785 SDOperand Callee = Call.getOperand(4); 1786 // On elf/pic %ebx needs to be livein. 1787 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ || 1788 !Subtarget->isPICStyleGOT()) 1789 return true; 1790 1791 // Can only do local tail calls with PIC. 1792 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1793 return G->getGlobal()->hasHiddenVisibility() 1794 || G->getGlobal()->hasProtectedVisibility(); 1795 } 1796 } 1797 1798 return false; 1799} 1800 1801//===----------------------------------------------------------------------===// 1802// Other Lowering Hooks 1803//===----------------------------------------------------------------------===// 1804 1805 1806SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { 1807 MachineFunction &MF = DAG.getMachineFunction(); 1808 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1809 int ReturnAddrIndex = FuncInfo->getRAIndex(); 1810 1811 if (ReturnAddrIndex == 0) { 1812 // Set up a frame object for the return address. 1813 if (Subtarget->is64Bit()) 1814 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8); 1815 else 1816 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4); 1817 1818 FuncInfo->setRAIndex(ReturnAddrIndex); 1819 } 1820 1821 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 1822} 1823 1824 1825 1826/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86 1827/// specific condition code. It returns a false if it cannot do a direct 1828/// translation. X86CC is the translated CondCode. LHS/RHS are modified as 1829/// needed. 1830static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 1831 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS, 1832 SelectionDAG &DAG) { 1833 X86CC = X86::COND_INVALID; 1834 if (!isFP) { 1835 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 1836 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 1837 // X > -1 -> X == 0, jump !sign. 1838 RHS = DAG.getConstant(0, RHS.getValueType()); 1839 X86CC = X86::COND_NS; 1840 return true; 1841 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 1842 // X < 0 -> X == 0, jump on sign. 1843 X86CC = X86::COND_S; 1844 return true; 1845 } else if (SetCCOpcode == ISD::SETLT && RHSC->getValue() == 1) { 1846 // X < 1 -> X <= 0 1847 RHS = DAG.getConstant(0, RHS.getValueType()); 1848 X86CC = X86::COND_LE; 1849 return true; 1850 } 1851 } 1852 1853 switch (SetCCOpcode) { 1854 default: break; 1855 case ISD::SETEQ: X86CC = X86::COND_E; break; 1856 case ISD::SETGT: X86CC = X86::COND_G; break; 1857 case ISD::SETGE: X86CC = X86::COND_GE; break; 1858 case ISD::SETLT: X86CC = X86::COND_L; break; 1859 case ISD::SETLE: X86CC = X86::COND_LE; break; 1860 case ISD::SETNE: X86CC = X86::COND_NE; break; 1861 case ISD::SETULT: X86CC = X86::COND_B; break; 1862 case ISD::SETUGT: X86CC = X86::COND_A; break; 1863 case ISD::SETULE: X86CC = X86::COND_BE; break; 1864 case ISD::SETUGE: X86CC = X86::COND_AE; break; 1865 } 1866 } else { 1867 // On a floating point condition, the flags are set as follows: 1868 // ZF PF CF op 1869 // 0 | 0 | 0 | X > Y 1870 // 0 | 0 | 1 | X < Y 1871 // 1 | 0 | 0 | X == Y 1872 // 1 | 1 | 1 | unordered 1873 bool Flip = false; 1874 switch (SetCCOpcode) { 1875 default: break; 1876 case ISD::SETUEQ: 1877 case ISD::SETEQ: X86CC = X86::COND_E; break; 1878 case ISD::SETOLT: Flip = true; // Fallthrough 1879 case ISD::SETOGT: 1880 case ISD::SETGT: X86CC = X86::COND_A; break; 1881 case ISD::SETOLE: Flip = true; // Fallthrough 1882 case ISD::SETOGE: 1883 case ISD::SETGE: X86CC = X86::COND_AE; break; 1884 case ISD::SETUGT: Flip = true; // Fallthrough 1885 case ISD::SETULT: 1886 case ISD::SETLT: X86CC = X86::COND_B; break; 1887 case ISD::SETUGE: Flip = true; // Fallthrough 1888 case ISD::SETULE: 1889 case ISD::SETLE: X86CC = X86::COND_BE; break; 1890 case ISD::SETONE: 1891 case ISD::SETNE: X86CC = X86::COND_NE; break; 1892 case ISD::SETUO: X86CC = X86::COND_P; break; 1893 case ISD::SETO: X86CC = X86::COND_NP; break; 1894 } 1895 if (Flip) 1896 std::swap(LHS, RHS); 1897 } 1898 1899 return X86CC != X86::COND_INVALID; 1900} 1901 1902/// hasFPCMov - is there a floating point cmov for the specific X86 condition 1903/// code. Current x86 isa includes the following FP cmov instructions: 1904/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 1905static bool hasFPCMov(unsigned X86CC) { 1906 switch (X86CC) { 1907 default: 1908 return false; 1909 case X86::COND_B: 1910 case X86::COND_BE: 1911 case X86::COND_E: 1912 case X86::COND_P: 1913 case X86::COND_A: 1914 case X86::COND_AE: 1915 case X86::COND_NE: 1916 case X86::COND_NP: 1917 return true; 1918 } 1919} 1920 1921/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return 1922/// true if Op is undef or if its value falls within the specified range (L, H]. 1923static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { 1924 if (Op.getOpcode() == ISD::UNDEF) 1925 return true; 1926 1927 unsigned Val = cast<ConstantSDNode>(Op)->getValue(); 1928 return (Val >= Low && Val < Hi); 1929} 1930 1931/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return 1932/// true if Op is undef or if its value equal to the specified value. 1933static bool isUndefOrEqual(SDOperand Op, unsigned Val) { 1934 if (Op.getOpcode() == ISD::UNDEF) 1935 return true; 1936 return cast<ConstantSDNode>(Op)->getValue() == Val; 1937} 1938 1939/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand 1940/// specifies a shuffle of elements that is suitable for input to PSHUFD. 1941bool X86::isPSHUFDMask(SDNode *N) { 1942 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1943 1944 if (N->getNumOperands() != 2 && N->getNumOperands() != 4) 1945 return false; 1946 1947 // Check if the value doesn't reference the second vector. 1948 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1949 SDOperand Arg = N->getOperand(i); 1950 if (Arg.getOpcode() == ISD::UNDEF) continue; 1951 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1952 if (cast<ConstantSDNode>(Arg)->getValue() >= e) 1953 return false; 1954 } 1955 1956 return true; 1957} 1958 1959/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand 1960/// specifies a shuffle of elements that is suitable for input to PSHUFHW. 1961bool X86::isPSHUFHWMask(SDNode *N) { 1962 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1963 1964 if (N->getNumOperands() != 8) 1965 return false; 1966 1967 // Lower quadword copied in order. 1968 for (unsigned i = 0; i != 4; ++i) { 1969 SDOperand Arg = N->getOperand(i); 1970 if (Arg.getOpcode() == ISD::UNDEF) continue; 1971 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1972 if (cast<ConstantSDNode>(Arg)->getValue() != i) 1973 return false; 1974 } 1975 1976 // Upper quadword shuffled. 1977 for (unsigned i = 4; i != 8; ++i) { 1978 SDOperand Arg = N->getOperand(i); 1979 if (Arg.getOpcode() == ISD::UNDEF) continue; 1980 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1981 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 1982 if (Val < 4 || Val > 7) 1983 return false; 1984 } 1985 1986 return true; 1987} 1988 1989/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand 1990/// specifies a shuffle of elements that is suitable for input to PSHUFLW. 1991bool X86::isPSHUFLWMask(SDNode *N) { 1992 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1993 1994 if (N->getNumOperands() != 8) 1995 return false; 1996 1997 // Upper quadword copied in order. 1998 for (unsigned i = 4; i != 8; ++i) 1999 if (!isUndefOrEqual(N->getOperand(i), i)) 2000 return false; 2001 2002 // Lower quadword shuffled. 2003 for (unsigned i = 0; i != 4; ++i) 2004 if (!isUndefOrInRange(N->getOperand(i), 0, 4)) 2005 return false; 2006 2007 return true; 2008} 2009 2010/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 2011/// specifies a shuffle of elements that is suitable for input to SHUFP*. 2012static bool isSHUFPMask(const SDOperand *Elems, unsigned NumElems) { 2013 if (NumElems != 2 && NumElems != 4) return false; 2014 2015 unsigned Half = NumElems / 2; 2016 for (unsigned i = 0; i < Half; ++i) 2017 if (!isUndefOrInRange(Elems[i], 0, NumElems)) 2018 return false; 2019 for (unsigned i = Half; i < NumElems; ++i) 2020 if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2)) 2021 return false; 2022 2023 return true; 2024} 2025 2026bool X86::isSHUFPMask(SDNode *N) { 2027 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2028 return ::isSHUFPMask(N->op_begin(), N->getNumOperands()); 2029} 2030 2031/// isCommutedSHUFP - Returns true if the shuffle mask is exactly 2032/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 2033/// half elements to come from vector 1 (which would equal the dest.) and 2034/// the upper half to come from vector 2. 2035static bool isCommutedSHUFP(const SDOperand *Ops, unsigned NumOps) { 2036 if (NumOps != 2 && NumOps != 4) return false; 2037 2038 unsigned Half = NumOps / 2; 2039 for (unsigned i = 0; i < Half; ++i) 2040 if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2)) 2041 return false; 2042 for (unsigned i = Half; i < NumOps; ++i) 2043 if (!isUndefOrInRange(Ops[i], 0, NumOps)) 2044 return false; 2045 return true; 2046} 2047 2048static bool isCommutedSHUFP(SDNode *N) { 2049 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2050 return isCommutedSHUFP(N->op_begin(), N->getNumOperands()); 2051} 2052 2053/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 2054/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 2055bool X86::isMOVHLPSMask(SDNode *N) { 2056 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2057 2058 if (N->getNumOperands() != 4) 2059 return false; 2060 2061 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 2062 return isUndefOrEqual(N->getOperand(0), 6) && 2063 isUndefOrEqual(N->getOperand(1), 7) && 2064 isUndefOrEqual(N->getOperand(2), 2) && 2065 isUndefOrEqual(N->getOperand(3), 3); 2066} 2067 2068/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 2069/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 2070/// <2, 3, 2, 3> 2071bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) { 2072 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2073 2074 if (N->getNumOperands() != 4) 2075 return false; 2076 2077 // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3 2078 return isUndefOrEqual(N->getOperand(0), 2) && 2079 isUndefOrEqual(N->getOperand(1), 3) && 2080 isUndefOrEqual(N->getOperand(2), 2) && 2081 isUndefOrEqual(N->getOperand(3), 3); 2082} 2083 2084/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 2085/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 2086bool X86::isMOVLPMask(SDNode *N) { 2087 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2088 2089 unsigned NumElems = N->getNumOperands(); 2090 if (NumElems != 2 && NumElems != 4) 2091 return false; 2092 2093 for (unsigned i = 0; i < NumElems/2; ++i) 2094 if (!isUndefOrEqual(N->getOperand(i), i + NumElems)) 2095 return false; 2096 2097 for (unsigned i = NumElems/2; i < NumElems; ++i) 2098 if (!isUndefOrEqual(N->getOperand(i), i)) 2099 return false; 2100 2101 return true; 2102} 2103 2104/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand 2105/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D} 2106/// and MOVLHPS. 2107bool X86::isMOVHPMask(SDNode *N) { 2108 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2109 2110 unsigned NumElems = N->getNumOperands(); 2111 if (NumElems != 2 && NumElems != 4) 2112 return false; 2113 2114 for (unsigned i = 0; i < NumElems/2; ++i) 2115 if (!isUndefOrEqual(N->getOperand(i), i)) 2116 return false; 2117 2118 for (unsigned i = 0; i < NumElems/2; ++i) { 2119 SDOperand Arg = N->getOperand(i + NumElems/2); 2120 if (!isUndefOrEqual(Arg, i + NumElems)) 2121 return false; 2122 } 2123 2124 return true; 2125} 2126 2127/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 2128/// specifies a shuffle of elements that is suitable for input to UNPCKL. 2129bool static isUNPCKLMask(const SDOperand *Elts, unsigned NumElts, 2130 bool V2IsSplat = false) { 2131 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2132 return false; 2133 2134 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2135 SDOperand BitI = Elts[i]; 2136 SDOperand BitI1 = Elts[i+1]; 2137 if (!isUndefOrEqual(BitI, j)) 2138 return false; 2139 if (V2IsSplat) { 2140 if (isUndefOrEqual(BitI1, NumElts)) 2141 return false; 2142 } else { 2143 if (!isUndefOrEqual(BitI1, j + NumElts)) 2144 return false; 2145 } 2146 } 2147 2148 return true; 2149} 2150 2151bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) { 2152 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2153 return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2154} 2155 2156/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 2157/// specifies a shuffle of elements that is suitable for input to UNPCKH. 2158bool static isUNPCKHMask(const SDOperand *Elts, unsigned NumElts, 2159 bool V2IsSplat = false) { 2160 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2161 return false; 2162 2163 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2164 SDOperand BitI = Elts[i]; 2165 SDOperand BitI1 = Elts[i+1]; 2166 if (!isUndefOrEqual(BitI, j + NumElts/2)) 2167 return false; 2168 if (V2IsSplat) { 2169 if (isUndefOrEqual(BitI1, NumElts)) 2170 return false; 2171 } else { 2172 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts)) 2173 return false; 2174 } 2175 } 2176 2177 return true; 2178} 2179 2180bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) { 2181 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2182 return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2183} 2184 2185/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 2186/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 2187/// <0, 0, 1, 1> 2188bool X86::isUNPCKL_v_undef_Mask(SDNode *N) { 2189 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2190 2191 unsigned NumElems = N->getNumOperands(); 2192 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2193 return false; 2194 2195 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2196 SDOperand BitI = N->getOperand(i); 2197 SDOperand BitI1 = N->getOperand(i+1); 2198 2199 if (!isUndefOrEqual(BitI, j)) 2200 return false; 2201 if (!isUndefOrEqual(BitI1, j)) 2202 return false; 2203 } 2204 2205 return true; 2206} 2207 2208/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 2209/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 2210/// <2, 2, 3, 3> 2211bool X86::isUNPCKH_v_undef_Mask(SDNode *N) { 2212 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2213 2214 unsigned NumElems = N->getNumOperands(); 2215 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2216 return false; 2217 2218 for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { 2219 SDOperand BitI = N->getOperand(i); 2220 SDOperand BitI1 = N->getOperand(i + 1); 2221 2222 if (!isUndefOrEqual(BitI, j)) 2223 return false; 2224 if (!isUndefOrEqual(BitI1, j)) 2225 return false; 2226 } 2227 2228 return true; 2229} 2230 2231/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 2232/// specifies a shuffle of elements that is suitable for input to MOVSS, 2233/// MOVSD, and MOVD, i.e. setting the lowest element. 2234static bool isMOVLMask(const SDOperand *Elts, unsigned NumElts) { 2235 if (NumElts != 2 && NumElts != 4) 2236 return false; 2237 2238 if (!isUndefOrEqual(Elts[0], NumElts)) 2239 return false; 2240 2241 for (unsigned i = 1; i < NumElts; ++i) { 2242 if (!isUndefOrEqual(Elts[i], i)) 2243 return false; 2244 } 2245 2246 return true; 2247} 2248 2249bool X86::isMOVLMask(SDNode *N) { 2250 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2251 return ::isMOVLMask(N->op_begin(), N->getNumOperands()); 2252} 2253 2254/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 2255/// of what x86 movss want. X86 movs requires the lowest element to be lowest 2256/// element of vector 2 and the other elements to come from vector 1 in order. 2257static bool isCommutedMOVL(const SDOperand *Ops, unsigned NumOps, 2258 bool V2IsSplat = false, 2259 bool V2IsUndef = false) { 2260 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 2261 return false; 2262 2263 if (!isUndefOrEqual(Ops[0], 0)) 2264 return false; 2265 2266 for (unsigned i = 1; i < NumOps; ++i) { 2267 SDOperand Arg = Ops[i]; 2268 if (!(isUndefOrEqual(Arg, i+NumOps) || 2269 (V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) || 2270 (V2IsSplat && isUndefOrEqual(Arg, NumOps)))) 2271 return false; 2272 } 2273 2274 return true; 2275} 2276 2277static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false, 2278 bool V2IsUndef = false) { 2279 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2280 return isCommutedMOVL(N->op_begin(), N->getNumOperands(), 2281 V2IsSplat, V2IsUndef); 2282} 2283 2284/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2285/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 2286bool X86::isMOVSHDUPMask(SDNode *N) { 2287 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2288 2289 if (N->getNumOperands() != 4) 2290 return false; 2291 2292 // Expect 1, 1, 3, 3 2293 for (unsigned i = 0; i < 2; ++i) { 2294 SDOperand Arg = N->getOperand(i); 2295 if (Arg.getOpcode() == ISD::UNDEF) continue; 2296 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2297 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2298 if (Val != 1) return false; 2299 } 2300 2301 bool HasHi = false; 2302 for (unsigned i = 2; i < 4; ++i) { 2303 SDOperand Arg = N->getOperand(i); 2304 if (Arg.getOpcode() == ISD::UNDEF) continue; 2305 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2306 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2307 if (Val != 3) return false; 2308 HasHi = true; 2309 } 2310 2311 // Don't use movshdup if it can be done with a shufps. 2312 return HasHi; 2313} 2314 2315/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2316/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 2317bool X86::isMOVSLDUPMask(SDNode *N) { 2318 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2319 2320 if (N->getNumOperands() != 4) 2321 return false; 2322 2323 // Expect 0, 0, 2, 2 2324 for (unsigned i = 0; i < 2; ++i) { 2325 SDOperand Arg = N->getOperand(i); 2326 if (Arg.getOpcode() == ISD::UNDEF) continue; 2327 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2328 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2329 if (Val != 0) return false; 2330 } 2331 2332 bool HasHi = false; 2333 for (unsigned i = 2; i < 4; ++i) { 2334 SDOperand Arg = N->getOperand(i); 2335 if (Arg.getOpcode() == ISD::UNDEF) continue; 2336 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2337 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2338 if (Val != 2) return false; 2339 HasHi = true; 2340 } 2341 2342 // Don't use movshdup if it can be done with a shufps. 2343 return HasHi; 2344} 2345 2346/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand 2347/// specifies a identity operation on the LHS or RHS. 2348static bool isIdentityMask(SDNode *N, bool RHS = false) { 2349 unsigned NumElems = N->getNumOperands(); 2350 for (unsigned i = 0; i < NumElems; ++i) 2351 if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0))) 2352 return false; 2353 return true; 2354} 2355 2356/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2357/// a splat of a single element. 2358static bool isSplatMask(SDNode *N) { 2359 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2360 2361 // This is a splat operation if each element of the permute is the same, and 2362 // if the value doesn't reference the second vector. 2363 unsigned NumElems = N->getNumOperands(); 2364 SDOperand ElementBase; 2365 unsigned i = 0; 2366 for (; i != NumElems; ++i) { 2367 SDOperand Elt = N->getOperand(i); 2368 if (isa<ConstantSDNode>(Elt)) { 2369 ElementBase = Elt; 2370 break; 2371 } 2372 } 2373 2374 if (!ElementBase.Val) 2375 return false; 2376 2377 for (; i != NumElems; ++i) { 2378 SDOperand Arg = N->getOperand(i); 2379 if (Arg.getOpcode() == ISD::UNDEF) continue; 2380 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2381 if (Arg != ElementBase) return false; 2382 } 2383 2384 // Make sure it is a splat of the first vector operand. 2385 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems; 2386} 2387 2388/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2389/// a splat of a single element and it's a 2 or 4 element mask. 2390bool X86::isSplatMask(SDNode *N) { 2391 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2392 2393 // We can only splat 64-bit, and 32-bit quantities with a single instruction. 2394 if (N->getNumOperands() != 4 && N->getNumOperands() != 2) 2395 return false; 2396 return ::isSplatMask(N); 2397} 2398 2399/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand 2400/// specifies a splat of zero element. 2401bool X86::isSplatLoMask(SDNode *N) { 2402 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2403 2404 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) 2405 if (!isUndefOrEqual(N->getOperand(i), 0)) 2406 return false; 2407 return true; 2408} 2409 2410/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 2411/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* 2412/// instructions. 2413unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 2414 unsigned NumOperands = N->getNumOperands(); 2415 unsigned Shift = (NumOperands == 4) ? 2 : 1; 2416 unsigned Mask = 0; 2417 for (unsigned i = 0; i < NumOperands; ++i) { 2418 unsigned Val = 0; 2419 SDOperand Arg = N->getOperand(NumOperands-i-1); 2420 if (Arg.getOpcode() != ISD::UNDEF) 2421 Val = cast<ConstantSDNode>(Arg)->getValue(); 2422 if (Val >= NumOperands) Val -= NumOperands; 2423 Mask |= Val; 2424 if (i != NumOperands - 1) 2425 Mask <<= Shift; 2426 } 2427 2428 return Mask; 2429} 2430 2431/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 2432/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW 2433/// instructions. 2434unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 2435 unsigned Mask = 0; 2436 // 8 nodes, but we only care about the last 4. 2437 for (unsigned i = 7; i >= 4; --i) { 2438 unsigned Val = 0; 2439 SDOperand Arg = N->getOperand(i); 2440 if (Arg.getOpcode() != ISD::UNDEF) 2441 Val = cast<ConstantSDNode>(Arg)->getValue(); 2442 Mask |= (Val - 4); 2443 if (i != 4) 2444 Mask <<= 2; 2445 } 2446 2447 return Mask; 2448} 2449 2450/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 2451/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW 2452/// instructions. 2453unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 2454 unsigned Mask = 0; 2455 // 8 nodes, but we only care about the first 4. 2456 for (int i = 3; i >= 0; --i) { 2457 unsigned Val = 0; 2458 SDOperand Arg = N->getOperand(i); 2459 if (Arg.getOpcode() != ISD::UNDEF) 2460 Val = cast<ConstantSDNode>(Arg)->getValue(); 2461 Mask |= Val; 2462 if (i != 0) 2463 Mask <<= 2; 2464 } 2465 2466 return Mask; 2467} 2468 2469/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand 2470/// specifies a 8 element shuffle that can be broken into a pair of 2471/// PSHUFHW and PSHUFLW. 2472static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { 2473 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2474 2475 if (N->getNumOperands() != 8) 2476 return false; 2477 2478 // Lower quadword shuffled. 2479 for (unsigned i = 0; i != 4; ++i) { 2480 SDOperand Arg = N->getOperand(i); 2481 if (Arg.getOpcode() == ISD::UNDEF) continue; 2482 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2483 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2484 if (Val >= 4) 2485 return false; 2486 } 2487 2488 // Upper quadword shuffled. 2489 for (unsigned i = 4; i != 8; ++i) { 2490 SDOperand Arg = N->getOperand(i); 2491 if (Arg.getOpcode() == ISD::UNDEF) continue; 2492 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2493 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2494 if (Val < 4 || Val > 7) 2495 return false; 2496 } 2497 2498 return true; 2499} 2500 2501/// CommuteVectorShuffle - Swap vector_shuffle operands as well as 2502/// values in ther permute mask. 2503static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, 2504 SDOperand &V2, SDOperand &Mask, 2505 SelectionDAG &DAG) { 2506 MVT::ValueType VT = Op.getValueType(); 2507 MVT::ValueType MaskVT = Mask.getValueType(); 2508 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2509 unsigned NumElems = Mask.getNumOperands(); 2510 SmallVector<SDOperand, 8> MaskVec; 2511 2512 for (unsigned i = 0; i != NumElems; ++i) { 2513 SDOperand Arg = Mask.getOperand(i); 2514 if (Arg.getOpcode() == ISD::UNDEF) { 2515 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2516 continue; 2517 } 2518 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2519 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2520 if (Val < NumElems) 2521 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2522 else 2523 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2524 } 2525 2526 std::swap(V1, V2); 2527 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2528 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2529} 2530 2531/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 2532/// the two vector operands have swapped position. 2533static 2534SDOperand CommuteVectorShuffleMask(SDOperand Mask, SelectionDAG &DAG) { 2535 MVT::ValueType MaskVT = Mask.getValueType(); 2536 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2537 unsigned NumElems = Mask.getNumOperands(); 2538 SmallVector<SDOperand, 8> MaskVec; 2539 for (unsigned i = 0; i != NumElems; ++i) { 2540 SDOperand Arg = Mask.getOperand(i); 2541 if (Arg.getOpcode() == ISD::UNDEF) { 2542 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2543 continue; 2544 } 2545 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2546 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2547 if (Val < NumElems) 2548 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2549 else 2550 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2551 } 2552 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2553} 2554 2555 2556/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 2557/// match movhlps. The lower half elements should come from upper half of 2558/// V1 (and in order), and the upper half elements should come from the upper 2559/// half of V2 (and in order). 2560static bool ShouldXformToMOVHLPS(SDNode *Mask) { 2561 unsigned NumElems = Mask->getNumOperands(); 2562 if (NumElems != 4) 2563 return false; 2564 for (unsigned i = 0, e = 2; i != e; ++i) 2565 if (!isUndefOrEqual(Mask->getOperand(i), i+2)) 2566 return false; 2567 for (unsigned i = 2; i != 4; ++i) 2568 if (!isUndefOrEqual(Mask->getOperand(i), i+4)) 2569 return false; 2570 return true; 2571} 2572 2573/// isScalarLoadToVector - Returns true if the node is a scalar load that 2574/// is promoted to a vector. 2575static inline bool isScalarLoadToVector(SDNode *N) { 2576 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) { 2577 N = N->getOperand(0).Val; 2578 return ISD::isNON_EXTLoad(N); 2579 } 2580 return false; 2581} 2582 2583/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 2584/// match movlp{s|d}. The lower half elements should come from lower half of 2585/// V1 (and in order), and the upper half elements should come from the upper 2586/// half of V2 (and in order). And since V1 will become the source of the 2587/// MOVLP, it must be either a vector load or a scalar load to vector. 2588static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) { 2589 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 2590 return false; 2591 // Is V2 is a vector load, don't do this transformation. We will try to use 2592 // load folding shufps op. 2593 if (ISD::isNON_EXTLoad(V2)) 2594 return false; 2595 2596 unsigned NumElems = Mask->getNumOperands(); 2597 if (NumElems != 2 && NumElems != 4) 2598 return false; 2599 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 2600 if (!isUndefOrEqual(Mask->getOperand(i), i)) 2601 return false; 2602 for (unsigned i = NumElems/2; i != NumElems; ++i) 2603 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems)) 2604 return false; 2605 return true; 2606} 2607 2608/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 2609/// all the same. 2610static bool isSplatVector(SDNode *N) { 2611 if (N->getOpcode() != ISD::BUILD_VECTOR) 2612 return false; 2613 2614 SDOperand SplatValue = N->getOperand(0); 2615 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 2616 if (N->getOperand(i) != SplatValue) 2617 return false; 2618 return true; 2619} 2620 2621/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2622/// to an undef. 2623static bool isUndefShuffle(SDNode *N) { 2624 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2625 return false; 2626 2627 SDOperand V1 = N->getOperand(0); 2628 SDOperand V2 = N->getOperand(1); 2629 SDOperand Mask = N->getOperand(2); 2630 unsigned NumElems = Mask.getNumOperands(); 2631 for (unsigned i = 0; i != NumElems; ++i) { 2632 SDOperand Arg = Mask.getOperand(i); 2633 if (Arg.getOpcode() != ISD::UNDEF) { 2634 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2635 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF) 2636 return false; 2637 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF) 2638 return false; 2639 } 2640 } 2641 return true; 2642} 2643 2644/// isZeroNode - Returns true if Elt is a constant zero or a floating point 2645/// constant +0.0. 2646static inline bool isZeroNode(SDOperand Elt) { 2647 return ((isa<ConstantSDNode>(Elt) && 2648 cast<ConstantSDNode>(Elt)->getValue() == 0) || 2649 (isa<ConstantFPSDNode>(Elt) && 2650 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 2651} 2652 2653/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2654/// to an zero vector. 2655static bool isZeroShuffle(SDNode *N) { 2656 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2657 return false; 2658 2659 SDOperand V1 = N->getOperand(0); 2660 SDOperand V2 = N->getOperand(1); 2661 SDOperand Mask = N->getOperand(2); 2662 unsigned NumElems = Mask.getNumOperands(); 2663 for (unsigned i = 0; i != NumElems; ++i) { 2664 SDOperand Arg = Mask.getOperand(i); 2665 if (Arg.getOpcode() == ISD::UNDEF) 2666 continue; 2667 2668 unsigned Idx = cast<ConstantSDNode>(Arg)->getValue(); 2669 if (Idx < NumElems) { 2670 unsigned Opc = V1.Val->getOpcode(); 2671 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.Val)) 2672 continue; 2673 if (Opc != ISD::BUILD_VECTOR || 2674 !isZeroNode(V1.Val->getOperand(Idx))) 2675 return false; 2676 } else if (Idx >= NumElems) { 2677 unsigned Opc = V2.Val->getOpcode(); 2678 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.Val)) 2679 continue; 2680 if (Opc != ISD::BUILD_VECTOR || 2681 !isZeroNode(V2.Val->getOperand(Idx - NumElems))) 2682 return false; 2683 } 2684 } 2685 return true; 2686} 2687 2688/// getZeroVector - Returns a vector of specified type with all zero elements. 2689/// 2690static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) { 2691 assert(MVT::isVector(VT) && "Expected a vector type"); 2692 2693 // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2694 // type. This ensures they get CSE'd. 2695 SDOperand Cst = DAG.getTargetConstant(0, MVT::i32); 2696 SDOperand Vec; 2697 if (MVT::getSizeInBits(VT) == 64) // MMX 2698 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2699 else // SSE 2700 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2701 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2702} 2703 2704/// getOnesVector - Returns a vector of specified type with all bits set. 2705/// 2706static SDOperand getOnesVector(MVT::ValueType VT, SelectionDAG &DAG) { 2707 assert(MVT::isVector(VT) && "Expected a vector type"); 2708 2709 // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2710 // type. This ensures they get CSE'd. 2711 SDOperand Cst = DAG.getTargetConstant(~0U, MVT::i32); 2712 SDOperand Vec; 2713 if (MVT::getSizeInBits(VT) == 64) // MMX 2714 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2715 else // SSE 2716 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2717 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2718} 2719 2720 2721/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 2722/// that point to V2 points to its first element. 2723static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { 2724 assert(Mask.getOpcode() == ISD::BUILD_VECTOR); 2725 2726 bool Changed = false; 2727 SmallVector<SDOperand, 8> MaskVec; 2728 unsigned NumElems = Mask.getNumOperands(); 2729 for (unsigned i = 0; i != NumElems; ++i) { 2730 SDOperand Arg = Mask.getOperand(i); 2731 if (Arg.getOpcode() != ISD::UNDEF) { 2732 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2733 if (Val > NumElems) { 2734 Arg = DAG.getConstant(NumElems, Arg.getValueType()); 2735 Changed = true; 2736 } 2737 } 2738 MaskVec.push_back(Arg); 2739 } 2740 2741 if (Changed) 2742 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), 2743 &MaskVec[0], MaskVec.size()); 2744 return Mask; 2745} 2746 2747/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 2748/// operation of specified width. 2749static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { 2750 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2751 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2752 2753 SmallVector<SDOperand, 8> MaskVec; 2754 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); 2755 for (unsigned i = 1; i != NumElems; ++i) 2756 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2757 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2758} 2759 2760/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation 2761/// of specified width. 2762static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { 2763 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2764 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2765 SmallVector<SDOperand, 8> MaskVec; 2766 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 2767 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2768 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); 2769 } 2770 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2771} 2772 2773/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation 2774/// of specified width. 2775static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { 2776 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2777 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2778 unsigned Half = NumElems/2; 2779 SmallVector<SDOperand, 8> MaskVec; 2780 for (unsigned i = 0; i != Half; ++i) { 2781 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); 2782 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); 2783 } 2784 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2785} 2786 2787/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32. 2788/// 2789static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) { 2790 SDOperand V1 = Op.getOperand(0); 2791 SDOperand Mask = Op.getOperand(2); 2792 MVT::ValueType VT = Op.getValueType(); 2793 unsigned NumElems = Mask.getNumOperands(); 2794 Mask = getUnpacklMask(NumElems, DAG); 2795 while (NumElems != 4) { 2796 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask); 2797 NumElems >>= 1; 2798 } 2799 V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1); 2800 2801 Mask = getZeroVector(MVT::v4i32, DAG); 2802 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1, 2803 DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask); 2804 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); 2805} 2806 2807/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 2808/// vector of zero or undef vector. This produces a shuffle where the low 2809/// element of V2 is swizzled into the zero/undef vector, landing at element 2810/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 2811static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT, 2812 unsigned NumElems, unsigned Idx, 2813 bool isZero, SelectionDAG &DAG) { 2814 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT); 2815 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2816 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 2817 SmallVector<SDOperand, 16> MaskVec; 2818 for (unsigned i = 0; i != NumElems; ++i) 2819 if (i == Idx) // If this is the insertion idx, put the low elt of V2 here. 2820 MaskVec.push_back(DAG.getConstant(NumElems, EVT)); 2821 else 2822 MaskVec.push_back(DAG.getConstant(i, EVT)); 2823 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2824 &MaskVec[0], MaskVec.size()); 2825 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2826} 2827 2828/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 2829/// 2830static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, 2831 unsigned NumNonZero, unsigned NumZero, 2832 SelectionDAG &DAG, TargetLowering &TLI) { 2833 if (NumNonZero > 8) 2834 return SDOperand(); 2835 2836 SDOperand V(0, 0); 2837 bool First = true; 2838 for (unsigned i = 0; i < 16; ++i) { 2839 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 2840 if (ThisIsNonZero && First) { 2841 if (NumZero) 2842 V = getZeroVector(MVT::v8i16, DAG); 2843 else 2844 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2845 First = false; 2846 } 2847 2848 if ((i & 1) != 0) { 2849 SDOperand ThisElt(0, 0), LastElt(0, 0); 2850 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 2851 if (LastIsNonZero) { 2852 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); 2853 } 2854 if (ThisIsNonZero) { 2855 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i)); 2856 ThisElt = DAG.getNode(ISD::SHL, MVT::i16, 2857 ThisElt, DAG.getConstant(8, MVT::i8)); 2858 if (LastIsNonZero) 2859 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt); 2860 } else 2861 ThisElt = LastElt; 2862 2863 if (ThisElt.Val) 2864 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt, 2865 DAG.getIntPtrConstant(i/2)); 2866 } 2867 } 2868 2869 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V); 2870} 2871 2872/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 2873/// 2874static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, 2875 unsigned NumNonZero, unsigned NumZero, 2876 SelectionDAG &DAG, TargetLowering &TLI) { 2877 if (NumNonZero > 4) 2878 return SDOperand(); 2879 2880 SDOperand V(0, 0); 2881 bool First = true; 2882 for (unsigned i = 0; i < 8; ++i) { 2883 bool isNonZero = (NonZeros & (1 << i)) != 0; 2884 if (isNonZero) { 2885 if (First) { 2886 if (NumZero) 2887 V = getZeroVector(MVT::v8i16, DAG); 2888 else 2889 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2890 First = false; 2891 } 2892 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i), 2893 DAG.getIntPtrConstant(i)); 2894 } 2895 } 2896 2897 return V; 2898} 2899 2900SDOperand 2901X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 2902 // All zero's are handled with pxor, all one's are handled with pcmpeqd. 2903 if (ISD::isBuildVectorAllZeros(Op.Val) || ISD::isBuildVectorAllOnes(Op.Val)) { 2904 // Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to 2905 // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are 2906 // eliminated on x86-32 hosts. 2907 if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32) 2908 return Op; 2909 2910 if (ISD::isBuildVectorAllOnes(Op.Val)) 2911 return getOnesVector(Op.getValueType(), DAG); 2912 return getZeroVector(Op.getValueType(), DAG); 2913 } 2914 2915 MVT::ValueType VT = Op.getValueType(); 2916 MVT::ValueType EVT = MVT::getVectorElementType(VT); 2917 unsigned EVTBits = MVT::getSizeInBits(EVT); 2918 2919 unsigned NumElems = Op.getNumOperands(); 2920 unsigned NumZero = 0; 2921 unsigned NumNonZero = 0; 2922 unsigned NonZeros = 0; 2923 bool HasNonImms = false; 2924 SmallSet<SDOperand, 8> Values; 2925 for (unsigned i = 0; i < NumElems; ++i) { 2926 SDOperand Elt = Op.getOperand(i); 2927 if (Elt.getOpcode() == ISD::UNDEF) 2928 continue; 2929 Values.insert(Elt); 2930 if (Elt.getOpcode() != ISD::Constant && 2931 Elt.getOpcode() != ISD::ConstantFP) 2932 HasNonImms = true; 2933 if (isZeroNode(Elt)) 2934 NumZero++; 2935 else { 2936 NonZeros |= (1 << i); 2937 NumNonZero++; 2938 } 2939 } 2940 2941 if (NumNonZero == 0) { 2942 // All undef vector. Return an UNDEF. All zero vectors were handled above. 2943 return DAG.getNode(ISD::UNDEF, VT); 2944 } 2945 2946 // Splat is obviously ok. Let legalizer expand it to a shuffle. 2947 if (Values.size() == 1) 2948 return SDOperand(); 2949 2950 // Special case for single non-zero element. 2951 if (NumNonZero == 1 && NumElems <= 4) { 2952 unsigned Idx = CountTrailingZeros_32(NonZeros); 2953 SDOperand Item = Op.getOperand(Idx); 2954 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 2955 if (Idx == 0) 2956 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 2957 return getShuffleVectorZeroOrUndef(Item, VT, NumElems, Idx, 2958 NumZero > 0, DAG); 2959 else if (!HasNonImms) // Otherwise, it's better to do a constpool load. 2960 return SDOperand(); 2961 2962 if (EVTBits == 32) { 2963 // Turn it into a shuffle of zero and zero-extended scalar to vector. 2964 Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0, 2965 DAG); 2966 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2967 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 2968 SmallVector<SDOperand, 8> MaskVec; 2969 for (unsigned i = 0; i < NumElems; i++) 2970 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); 2971 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2972 &MaskVec[0], MaskVec.size()); 2973 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item, 2974 DAG.getNode(ISD::UNDEF, VT), Mask); 2975 } 2976 } 2977 2978 // A vector full of immediates; various special cases are already 2979 // handled, so this is best done with a single constant-pool load. 2980 if (!HasNonImms) 2981 return SDOperand(); 2982 2983 // Let legalizer expand 2-wide build_vectors. 2984 if (EVTBits == 64) 2985 return SDOperand(); 2986 2987 // If element VT is < 32 bits, convert it to inserts into a zero vector. 2988 if (EVTBits == 8 && NumElems == 16) { 2989 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 2990 *this); 2991 if (V.Val) return V; 2992 } 2993 2994 if (EVTBits == 16 && NumElems == 8) { 2995 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 2996 *this); 2997 if (V.Val) return V; 2998 } 2999 3000 // If element VT is == 32 bits, turn it into a number of shuffles. 3001 SmallVector<SDOperand, 8> V; 3002 V.resize(NumElems); 3003 if (NumElems == 4 && NumZero > 0) { 3004 for (unsigned i = 0; i < 4; ++i) { 3005 bool isZero = !(NonZeros & (1 << i)); 3006 if (isZero) 3007 V[i] = getZeroVector(VT, DAG); 3008 else 3009 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3010 } 3011 3012 for (unsigned i = 0; i < 2; ++i) { 3013 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 3014 default: break; 3015 case 0: 3016 V[i] = V[i*2]; // Must be a zero vector. 3017 break; 3018 case 1: 3019 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2], 3020 getMOVLMask(NumElems, DAG)); 3021 break; 3022 case 2: 3023 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3024 getMOVLMask(NumElems, DAG)); 3025 break; 3026 case 3: 3027 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3028 getUnpacklMask(NumElems, DAG)); 3029 break; 3030 } 3031 } 3032 3033 // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd) 3034 // clears the upper bits. 3035 // FIXME: we can do the same for v4f32 case when we know both parts of 3036 // the lower half come from scalar_to_vector (loadf32). We should do 3037 // that in post legalizer dag combiner with target specific hooks. 3038 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0) 3039 return V[0]; 3040 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3041 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 3042 SmallVector<SDOperand, 8> MaskVec; 3043 bool Reverse = (NonZeros & 0x3) == 2; 3044 for (unsigned i = 0; i < 2; ++i) 3045 if (Reverse) 3046 MaskVec.push_back(DAG.getConstant(1-i, EVT)); 3047 else 3048 MaskVec.push_back(DAG.getConstant(i, EVT)); 3049 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 3050 for (unsigned i = 0; i < 2; ++i) 3051 if (Reverse) 3052 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); 3053 else 3054 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); 3055 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3056 &MaskVec[0], MaskVec.size()); 3057 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask); 3058 } 3059 3060 if (Values.size() > 2) { 3061 // Expand into a number of unpckl*. 3062 // e.g. for v4f32 3063 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 3064 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 3065 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 3066 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG); 3067 for (unsigned i = 0; i < NumElems; ++i) 3068 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3069 NumElems >>= 1; 3070 while (NumElems != 0) { 3071 for (unsigned i = 0; i < NumElems; ++i) 3072 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems], 3073 UnpckMask); 3074 NumElems >>= 1; 3075 } 3076 return V[0]; 3077 } 3078 3079 return SDOperand(); 3080} 3081 3082static 3083SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, 3084 SDOperand PermMask, SelectionDAG &DAG, 3085 TargetLowering &TLI) { 3086 SDOperand NewV; 3087 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(8); 3088 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3089 MVT::ValueType PtrVT = TLI.getPointerTy(); 3090 SmallVector<SDOperand, 8> MaskElts(PermMask.Val->op_begin(), 3091 PermMask.Val->op_end()); 3092 3093 // First record which half of which vector the low elements come from. 3094 SmallVector<unsigned, 4> LowQuad(4); 3095 for (unsigned i = 0; i < 4; ++i) { 3096 SDOperand Elt = MaskElts[i]; 3097 if (Elt.getOpcode() == ISD::UNDEF) 3098 continue; 3099 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3100 int QuadIdx = EltIdx / 4; 3101 ++LowQuad[QuadIdx]; 3102 } 3103 int BestLowQuad = -1; 3104 unsigned MaxQuad = 1; 3105 for (unsigned i = 0; i < 4; ++i) { 3106 if (LowQuad[i] > MaxQuad) { 3107 BestLowQuad = i; 3108 MaxQuad = LowQuad[i]; 3109 } 3110 } 3111 3112 // Record which half of which vector the high elements come from. 3113 SmallVector<unsigned, 4> HighQuad(4); 3114 for (unsigned i = 4; i < 8; ++i) { 3115 SDOperand Elt = MaskElts[i]; 3116 if (Elt.getOpcode() == ISD::UNDEF) 3117 continue; 3118 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3119 int QuadIdx = EltIdx / 4; 3120 ++HighQuad[QuadIdx]; 3121 } 3122 int BestHighQuad = -1; 3123 MaxQuad = 1; 3124 for (unsigned i = 0; i < 4; ++i) { 3125 if (HighQuad[i] > MaxQuad) { 3126 BestHighQuad = i; 3127 MaxQuad = HighQuad[i]; 3128 } 3129 } 3130 3131 // If it's possible to sort parts of either half with PSHUF{H|L}W, then do it. 3132 if (BestLowQuad != -1 || BestHighQuad != -1) { 3133 // First sort the 4 chunks in order using shufpd. 3134 SmallVector<SDOperand, 8> MaskVec; 3135 if (BestLowQuad != -1) 3136 MaskVec.push_back(DAG.getConstant(BestLowQuad, MVT::i32)); 3137 else 3138 MaskVec.push_back(DAG.getConstant(0, MVT::i32)); 3139 if (BestHighQuad != -1) 3140 MaskVec.push_back(DAG.getConstant(BestHighQuad, MVT::i32)); 3141 else 3142 MaskVec.push_back(DAG.getConstant(1, MVT::i32)); 3143 SDOperand Mask= DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, &MaskVec[0],2); 3144 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64, 3145 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V1), 3146 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V2), Mask); 3147 NewV = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, NewV); 3148 3149 // Now sort high and low parts separately. 3150 BitVector InOrder(8); 3151 if (BestLowQuad != -1) { 3152 // Sort lower half in order using PSHUFLW. 3153 MaskVec.clear(); 3154 bool AnyOutOrder = false; 3155 for (unsigned i = 0; i != 4; ++i) { 3156 SDOperand Elt = MaskElts[i]; 3157 if (Elt.getOpcode() == ISD::UNDEF) { 3158 MaskVec.push_back(Elt); 3159 InOrder.set(i); 3160 } else { 3161 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3162 if (EltIdx != i) 3163 AnyOutOrder = true; 3164 MaskVec.push_back(DAG.getConstant(EltIdx % 4, MaskEVT)); 3165 // If this element is in the right place after this shuffle, then 3166 // remember it. 3167 if ((int)(EltIdx / 4) == BestLowQuad) 3168 InOrder.set(i); 3169 } 3170 } 3171 if (AnyOutOrder) { 3172 for (unsigned i = 4; i != 8; ++i) 3173 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3174 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3175 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3176 } 3177 } 3178 3179 if (BestHighQuad != -1) { 3180 // Sort high half in order using PSHUFHW if possible. 3181 MaskVec.clear(); 3182 for (unsigned i = 0; i != 4; ++i) 3183 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3184 bool AnyOutOrder = false; 3185 for (unsigned i = 4; i != 8; ++i) { 3186 SDOperand Elt = MaskElts[i]; 3187 if (Elt.getOpcode() == ISD::UNDEF) { 3188 MaskVec.push_back(Elt); 3189 InOrder.set(i); 3190 } else { 3191 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3192 if (EltIdx != i) 3193 AnyOutOrder = true; 3194 MaskVec.push_back(DAG.getConstant((EltIdx % 4) + 4, MaskEVT)); 3195 // If this element is in the right place after this shuffle, then 3196 // remember it. 3197 if ((int)(EltIdx / 4) == BestHighQuad) 3198 InOrder.set(i); 3199 } 3200 } 3201 if (AnyOutOrder) { 3202 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3203 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3204 } 3205 } 3206 3207 // The other elements are put in the right place using pextrw and pinsrw. 3208 for (unsigned i = 0; i != 8; ++i) { 3209 if (InOrder[i]) 3210 continue; 3211 SDOperand Elt = MaskElts[i]; 3212 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3213 if (EltIdx == i) 3214 continue; 3215 SDOperand ExtOp = (EltIdx < 8) 3216 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3217 DAG.getConstant(EltIdx, PtrVT)) 3218 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3219 DAG.getConstant(EltIdx - 8, PtrVT)); 3220 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3221 DAG.getConstant(i, PtrVT)); 3222 } 3223 return NewV; 3224 } 3225 3226 // PSHUF{H|L}W are not used. Lower into extracts and inserts but try to use 3227 ///as few as possible. 3228 // First, let's find out how many elements are already in the right order. 3229 unsigned V1InOrder = 0; 3230 unsigned V1FromV1 = 0; 3231 unsigned V2InOrder = 0; 3232 unsigned V2FromV2 = 0; 3233 SmallVector<SDOperand, 8> V1Elts; 3234 SmallVector<SDOperand, 8> V2Elts; 3235 for (unsigned i = 0; i < 8; ++i) { 3236 SDOperand Elt = MaskElts[i]; 3237 if (Elt.getOpcode() == ISD::UNDEF) { 3238 V1Elts.push_back(Elt); 3239 V2Elts.push_back(Elt); 3240 ++V1InOrder; 3241 ++V2InOrder; 3242 continue; 3243 } 3244 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3245 if (EltIdx == i) { 3246 V1Elts.push_back(Elt); 3247 V2Elts.push_back(DAG.getConstant(i+8, MaskEVT)); 3248 ++V1InOrder; 3249 } else if (EltIdx == i+8) { 3250 V1Elts.push_back(Elt); 3251 V2Elts.push_back(DAG.getConstant(i, MaskEVT)); 3252 ++V2InOrder; 3253 } else if (EltIdx < 8) { 3254 V1Elts.push_back(Elt); 3255 ++V1FromV1; 3256 } else { 3257 V2Elts.push_back(DAG.getConstant(EltIdx-8, MaskEVT)); 3258 ++V2FromV2; 3259 } 3260 } 3261 3262 if (V2InOrder > V1InOrder) { 3263 PermMask = CommuteVectorShuffleMask(PermMask, DAG); 3264 std::swap(V1, V2); 3265 std::swap(V1Elts, V2Elts); 3266 std::swap(V1FromV1, V2FromV2); 3267 } 3268 3269 if ((V1FromV1 + V1InOrder) != 8) { 3270 // Some elements are from V2. 3271 if (V1FromV1) { 3272 // If there are elements that are from V1 but out of place, 3273 // then first sort them in place 3274 SmallVector<SDOperand, 8> MaskVec; 3275 for (unsigned i = 0; i < 8; ++i) { 3276 SDOperand Elt = V1Elts[i]; 3277 if (Elt.getOpcode() == ISD::UNDEF) { 3278 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3279 continue; 3280 } 3281 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3282 if (EltIdx >= 8) 3283 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3284 else 3285 MaskVec.push_back(DAG.getConstant(EltIdx, MaskEVT)); 3286 } 3287 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3288 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, V1, V1, Mask); 3289 } 3290 3291 NewV = V1; 3292 for (unsigned i = 0; i < 8; ++i) { 3293 SDOperand Elt = V1Elts[i]; 3294 if (Elt.getOpcode() == ISD::UNDEF) 3295 continue; 3296 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3297 if (EltIdx < 8) 3298 continue; 3299 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3300 DAG.getConstant(EltIdx - 8, PtrVT)); 3301 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3302 DAG.getConstant(i, PtrVT)); 3303 } 3304 return NewV; 3305 } else { 3306 // All elements are from V1. 3307 NewV = V1; 3308 for (unsigned i = 0; i < 8; ++i) { 3309 SDOperand Elt = V1Elts[i]; 3310 if (Elt.getOpcode() == ISD::UNDEF) 3311 continue; 3312 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3313 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3314 DAG.getConstant(EltIdx, PtrVT)); 3315 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3316 DAG.getConstant(i, PtrVT)); 3317 } 3318 return NewV; 3319 } 3320} 3321 3322/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 3323/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be 3324/// done when every pair / quad of shuffle mask elements point to elements in 3325/// the right sequence. e.g. 3326/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15> 3327static 3328SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2, 3329 MVT::ValueType VT, 3330 SDOperand PermMask, SelectionDAG &DAG, 3331 TargetLowering &TLI) { 3332 unsigned NumElems = PermMask.getNumOperands(); 3333 unsigned NewWidth = (NumElems == 4) ? 2 : 4; 3334 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NewWidth); 3335 MVT::ValueType NewVT = MaskVT; 3336 switch (VT) { 3337 case MVT::v4f32: NewVT = MVT::v2f64; break; 3338 case MVT::v4i32: NewVT = MVT::v2i64; break; 3339 case MVT::v8i16: NewVT = MVT::v4i32; break; 3340 case MVT::v16i8: NewVT = MVT::v4i32; break; 3341 default: assert(false && "Unexpected!"); 3342 } 3343 3344 if (NewWidth == 2) 3345 if (MVT::isInteger(VT)) 3346 NewVT = MVT::v2i64; 3347 else 3348 NewVT = MVT::v2f64; 3349 unsigned Scale = NumElems / NewWidth; 3350 SmallVector<SDOperand, 8> MaskVec; 3351 for (unsigned i = 0; i < NumElems; i += Scale) { 3352 unsigned StartIdx = ~0U; 3353 for (unsigned j = 0; j < Scale; ++j) { 3354 SDOperand Elt = PermMask.getOperand(i+j); 3355 if (Elt.getOpcode() == ISD::UNDEF) 3356 continue; 3357 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3358 if (StartIdx == ~0U) 3359 StartIdx = EltIdx - (EltIdx % Scale); 3360 if (EltIdx != StartIdx + j) 3361 return SDOperand(); 3362 } 3363 if (StartIdx == ~0U) 3364 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MVT::i32)); 3365 else 3366 MaskVec.push_back(DAG.getConstant(StartIdx / Scale, MVT::i32)); 3367 } 3368 3369 V1 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V1); 3370 V2 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V2); 3371 return DAG.getNode(ISD::VECTOR_SHUFFLE, NewVT, V1, V2, 3372 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3373 &MaskVec[0], MaskVec.size())); 3374} 3375 3376SDOperand 3377X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 3378 SDOperand V1 = Op.getOperand(0); 3379 SDOperand V2 = Op.getOperand(1); 3380 SDOperand PermMask = Op.getOperand(2); 3381 MVT::ValueType VT = Op.getValueType(); 3382 unsigned NumElems = PermMask.getNumOperands(); 3383 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 3384 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 3385 bool V1IsSplat = false; 3386 bool V2IsSplat = false; 3387 3388 if (isUndefShuffle(Op.Val)) 3389 return DAG.getNode(ISD::UNDEF, VT); 3390 3391 if (isZeroShuffle(Op.Val)) 3392 return getZeroVector(VT, DAG); 3393 3394 if (isIdentityMask(PermMask.Val)) 3395 return V1; 3396 else if (isIdentityMask(PermMask.Val, true)) 3397 return V2; 3398 3399 if (isSplatMask(PermMask.Val)) { 3400 if (NumElems <= 4) return Op; 3401 // Promote it to a v4i32 splat. 3402 return PromoteSplat(Op, DAG); 3403 } 3404 3405 // If the shuffle can be profitably rewritten as a narrower shuffle, then 3406 // do it! 3407 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 3408 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3409 if (NewOp.Val) 3410 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3411 } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 3412 // FIXME: Figure out a cleaner way to do this. 3413 // Try to make use of movq to zero out the top part. 3414 if (ISD::isBuildVectorAllZeros(V2.Val)) { 3415 SDOperand NewOp = RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3416 if (NewOp.Val) { 3417 SDOperand NewV1 = NewOp.getOperand(0); 3418 SDOperand NewV2 = NewOp.getOperand(1); 3419 SDOperand NewMask = NewOp.getOperand(2); 3420 if (isCommutedMOVL(NewMask.Val, true, false)) { 3421 NewOp = CommuteVectorShuffle(NewOp, NewV1, NewV2, NewMask, DAG); 3422 NewOp = DAG.getNode(ISD::VECTOR_SHUFFLE, NewOp.getValueType(), 3423 NewV1, NewV2, getMOVLMask(2, DAG)); 3424 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3425 } 3426 } 3427 } else if (ISD::isBuildVectorAllZeros(V1.Val)) { 3428 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3429 if (NewOp.Val && X86::isMOVLMask(NewOp.getOperand(2).Val)) 3430 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3431 } 3432 } 3433 3434 if (X86::isMOVLMask(PermMask.Val)) 3435 return (V1IsUndef) ? V2 : Op; 3436 3437 if (X86::isMOVSHDUPMask(PermMask.Val) || 3438 X86::isMOVSLDUPMask(PermMask.Val) || 3439 X86::isMOVHLPSMask(PermMask.Val) || 3440 X86::isMOVHPMask(PermMask.Val) || 3441 X86::isMOVLPMask(PermMask.Val)) 3442 return Op; 3443 3444 if (ShouldXformToMOVHLPS(PermMask.Val) || 3445 ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val)) 3446 return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3447 3448 bool Commuted = false; 3449 // FIXME: This should also accept a bitcast of a splat? Be careful, not 3450 // 1,1,1,1 -> v8i16 though. 3451 V1IsSplat = isSplatVector(V1.Val); 3452 V2IsSplat = isSplatVector(V2.Val); 3453 3454 // Canonicalize the splat or undef, if present, to be on the RHS. 3455 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 3456 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3457 std::swap(V1IsSplat, V2IsSplat); 3458 std::swap(V1IsUndef, V2IsUndef); 3459 Commuted = true; 3460 } 3461 3462 // FIXME: Figure out a cleaner way to do this. 3463 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) { 3464 if (V2IsUndef) return V1; 3465 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3466 if (V2IsSplat) { 3467 // V2 is a splat, so the mask may be malformed. That is, it may point 3468 // to any V2 element. The instruction selectior won't like this. Get 3469 // a corrected mask and commute to form a proper MOVS{S|D}. 3470 SDOperand NewMask = getMOVLMask(NumElems, DAG); 3471 if (NewMask.Val != PermMask.Val) 3472 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3473 } 3474 return Op; 3475 } 3476 3477 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3478 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3479 X86::isUNPCKLMask(PermMask.Val) || 3480 X86::isUNPCKHMask(PermMask.Val)) 3481 return Op; 3482 3483 if (V2IsSplat) { 3484 // Normalize mask so all entries that point to V2 points to its first 3485 // element then try to match unpck{h|l} again. If match, return a 3486 // new vector_shuffle with the corrected mask. 3487 SDOperand NewMask = NormalizeMask(PermMask, DAG); 3488 if (NewMask.Val != PermMask.Val) { 3489 if (X86::isUNPCKLMask(PermMask.Val, true)) { 3490 SDOperand NewMask = getUnpacklMask(NumElems, DAG); 3491 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3492 } else if (X86::isUNPCKHMask(PermMask.Val, true)) { 3493 SDOperand NewMask = getUnpackhMask(NumElems, DAG); 3494 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3495 } 3496 } 3497 } 3498 3499 // Normalize the node to match x86 shuffle ops if needed 3500 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val)) 3501 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3502 3503 if (Commuted) { 3504 // Commute is back and try unpck* again. 3505 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3506 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3507 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3508 X86::isUNPCKLMask(PermMask.Val) || 3509 X86::isUNPCKHMask(PermMask.Val)) 3510 return Op; 3511 } 3512 3513 // If VT is integer, try PSHUF* first, then SHUFP*. 3514 if (MVT::isInteger(VT)) { 3515 // MMX doesn't have PSHUFD; it does have PSHUFW. While it's theoretically 3516 // possible to shuffle a v2i32 using PSHUFW, that's not yet implemented. 3517 if (((MVT::getSizeInBits(VT) != 64 || NumElems == 4) && 3518 X86::isPSHUFDMask(PermMask.Val)) || 3519 X86::isPSHUFHWMask(PermMask.Val) || 3520 X86::isPSHUFLWMask(PermMask.Val)) { 3521 if (V2.getOpcode() != ISD::UNDEF) 3522 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3523 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3524 return Op; 3525 } 3526 3527 if (X86::isSHUFPMask(PermMask.Val) && 3528 MVT::getSizeInBits(VT) != 64) // Don't do this for MMX. 3529 return Op; 3530 } else { 3531 // Floating point cases in the other order. 3532 if (X86::isSHUFPMask(PermMask.Val)) 3533 return Op; 3534 if (X86::isPSHUFDMask(PermMask.Val) || 3535 X86::isPSHUFHWMask(PermMask.Val) || 3536 X86::isPSHUFLWMask(PermMask.Val)) { 3537 if (V2.getOpcode() != ISD::UNDEF) 3538 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3539 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3540 return Op; 3541 } 3542 } 3543 3544 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 3545 if (VT == MVT::v8i16) { 3546 SDOperand NewOp = LowerVECTOR_SHUFFLEv8i16(V1, V2, PermMask, DAG, *this); 3547 if (NewOp.Val) 3548 return NewOp; 3549 } 3550 3551 // Handle all 4 wide cases with a number of shuffles. 3552 if (NumElems == 4 && MVT::getSizeInBits(VT) != 64) { 3553 // Don't do this for MMX. 3554 MVT::ValueType MaskVT = PermMask.getValueType(); 3555 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3556 SmallVector<std::pair<int, int>, 8> Locs; 3557 Locs.reserve(NumElems); 3558 SmallVector<SDOperand, 8> Mask1(NumElems, 3559 DAG.getNode(ISD::UNDEF, MaskEVT)); 3560 SmallVector<SDOperand, 8> Mask2(NumElems, 3561 DAG.getNode(ISD::UNDEF, MaskEVT)); 3562 unsigned NumHi = 0; 3563 unsigned NumLo = 0; 3564 // If no more than two elements come from either vector. This can be 3565 // implemented with two shuffles. First shuffle gather the elements. 3566 // The second shuffle, which takes the first shuffle as both of its 3567 // vector operands, put the elements into the right order. 3568 for (unsigned i = 0; i != NumElems; ++i) { 3569 SDOperand Elt = PermMask.getOperand(i); 3570 if (Elt.getOpcode() == ISD::UNDEF) { 3571 Locs[i] = std::make_pair(-1, -1); 3572 } else { 3573 unsigned Val = cast<ConstantSDNode>(Elt)->getValue(); 3574 if (Val < NumElems) { 3575 Locs[i] = std::make_pair(0, NumLo); 3576 Mask1[NumLo] = Elt; 3577 NumLo++; 3578 } else { 3579 Locs[i] = std::make_pair(1, NumHi); 3580 if (2+NumHi < NumElems) 3581 Mask1[2+NumHi] = Elt; 3582 NumHi++; 3583 } 3584 } 3585 } 3586 if (NumLo <= 2 && NumHi <= 2) { 3587 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3588 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3589 &Mask1[0], Mask1.size())); 3590 for (unsigned i = 0; i != NumElems; ++i) { 3591 if (Locs[i].first == -1) 3592 continue; 3593 else { 3594 unsigned Idx = (i < NumElems/2) ? 0 : NumElems; 3595 Idx += Locs[i].first * (NumElems/2) + Locs[i].second; 3596 Mask2[i] = DAG.getConstant(Idx, MaskEVT); 3597 } 3598 } 3599 3600 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, 3601 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3602 &Mask2[0], Mask2.size())); 3603 } 3604 3605 // Break it into (shuffle shuffle_hi, shuffle_lo). 3606 Locs.clear(); 3607 SmallVector<SDOperand,8> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3608 SmallVector<SDOperand,8> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3609 SmallVector<SDOperand,8> *MaskPtr = &LoMask; 3610 unsigned MaskIdx = 0; 3611 unsigned LoIdx = 0; 3612 unsigned HiIdx = NumElems/2; 3613 for (unsigned i = 0; i != NumElems; ++i) { 3614 if (i == NumElems/2) { 3615 MaskPtr = &HiMask; 3616 MaskIdx = 1; 3617 LoIdx = 0; 3618 HiIdx = NumElems/2; 3619 } 3620 SDOperand Elt = PermMask.getOperand(i); 3621 if (Elt.getOpcode() == ISD::UNDEF) { 3622 Locs[i] = std::make_pair(-1, -1); 3623 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) { 3624 Locs[i] = std::make_pair(MaskIdx, LoIdx); 3625 (*MaskPtr)[LoIdx] = Elt; 3626 LoIdx++; 3627 } else { 3628 Locs[i] = std::make_pair(MaskIdx, HiIdx); 3629 (*MaskPtr)[HiIdx] = Elt; 3630 HiIdx++; 3631 } 3632 } 3633 3634 SDOperand LoShuffle = 3635 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3636 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3637 &LoMask[0], LoMask.size())); 3638 SDOperand HiShuffle = 3639 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3640 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3641 &HiMask[0], HiMask.size())); 3642 SmallVector<SDOperand, 8> MaskOps; 3643 for (unsigned i = 0; i != NumElems; ++i) { 3644 if (Locs[i].first == -1) { 3645 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3646 } else { 3647 unsigned Idx = Locs[i].first * NumElems + Locs[i].second; 3648 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT)); 3649 } 3650 } 3651 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle, 3652 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3653 &MaskOps[0], MaskOps.size())); 3654 } 3655 3656 return SDOperand(); 3657} 3658 3659SDOperand 3660X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3661 if (!isa<ConstantSDNode>(Op.getOperand(1))) 3662 return SDOperand(); 3663 3664 MVT::ValueType VT = Op.getValueType(); 3665 // TODO: handle v16i8. 3666 if (MVT::getSizeInBits(VT) == 16) { 3667 SDOperand Vec = Op.getOperand(0); 3668 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3669 if (Idx == 0) 3670 return DAG.getNode(ISD::TRUNCATE, MVT::i16, 3671 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, 3672 DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Vec), 3673 Op.getOperand(1))); 3674 // Transform it so it match pextrw which produces a 32-bit result. 3675 MVT::ValueType EVT = (MVT::ValueType)(VT+1); 3676 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, 3677 Op.getOperand(0), Op.getOperand(1)); 3678 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, 3679 DAG.getValueType(VT)); 3680 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3681 } else if (MVT::getSizeInBits(VT) == 32) { 3682 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3683 if (Idx == 0) 3684 return Op; 3685 // SHUFPS the element to the lowest double word, then movss. 3686 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3687 SmallVector<SDOperand, 8> IdxVec; 3688 IdxVec. 3689 push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT))); 3690 IdxVec. 3691 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3692 IdxVec. 3693 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3694 IdxVec. 3695 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3696 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3697 &IdxVec[0], IdxVec.size()); 3698 SDOperand Vec = Op.getOperand(0); 3699 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3700 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3701 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3702 DAG.getIntPtrConstant(0)); 3703 } else if (MVT::getSizeInBits(VT) == 64) { 3704 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3705 if (Idx == 0) 3706 return Op; 3707 3708 // UNPCKHPD the element to the lowest double word, then movsd. 3709 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 3710 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 3711 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3712 SmallVector<SDOperand, 8> IdxVec; 3713 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT))); 3714 IdxVec. 3715 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3716 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3717 &IdxVec[0], IdxVec.size()); 3718 SDOperand Vec = Op.getOperand(0); 3719 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3720 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3721 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3722 DAG.getIntPtrConstant(0)); 3723 } 3724 3725 return SDOperand(); 3726} 3727 3728SDOperand 3729X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3730 MVT::ValueType VT = Op.getValueType(); 3731 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3732 if (EVT == MVT::i8) 3733 return SDOperand(); 3734 3735 SDOperand N0 = Op.getOperand(0); 3736 SDOperand N1 = Op.getOperand(1); 3737 SDOperand N2 = Op.getOperand(2); 3738 3739 if (MVT::getSizeInBits(EVT) == 16) { 3740 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 3741 // as its second argument. 3742 if (N1.getValueType() != MVT::i32) 3743 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3744 if (N2.getValueType() != MVT::i32) 3745 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); 3746 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2); 3747 } 3748 return SDOperand(); 3749} 3750 3751SDOperand 3752X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3753 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); 3754 return DAG.getNode(X86ISD::S2VEC, Op.getValueType(), AnyExt); 3755} 3756 3757// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 3758// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 3759// one of the above mentioned nodes. It has to be wrapped because otherwise 3760// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 3761// be used to form addressing mode. These wrapped nodes will be selected 3762// into MOV32ri. 3763SDOperand 3764X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 3765 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 3766 SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(), 3767 getPointerTy(), 3768 CP->getAlignment()); 3769 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3770 // With PIC, the address is actually $g + Offset. 3771 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3772 !Subtarget->isPICStyleRIPRel()) { 3773 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3774 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3775 Result); 3776 } 3777 3778 return Result; 3779} 3780 3781SDOperand 3782X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 3783 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3784 SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); 3785 // If it's a debug information descriptor, don't mess with it. 3786 if (DAG.isVerifiedDebugInfoDesc(Op)) 3787 return Result; 3788 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3789 // With PIC, the address is actually $g + Offset. 3790 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3791 !Subtarget->isPICStyleRIPRel()) { 3792 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3793 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3794 Result); 3795 } 3796 3797 // For Darwin & Mingw32, external and weak symbols are indirect, so we want to 3798 // load the value at address GV, not the value of GV itself. This means that 3799 // the GlobalAddress must be in the base or index register of the address, not 3800 // the GV offset field. Platform check is inside GVRequiresExtraLoad() call 3801 // The same applies for external symbols during PIC codegen 3802 if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)) 3803 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, 3804 &PseudoSourceValue::getGOT(), 0); 3805 3806 return Result; 3807} 3808 3809// Lower ISD::GlobalTLSAddress using the "general dynamic" model 3810static SDOperand 3811LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 3812 const MVT::ValueType PtrVT) { 3813 SDOperand InFlag; 3814 SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX, 3815 DAG.getNode(X86ISD::GlobalBaseReg, 3816 PtrVT), InFlag); 3817 InFlag = Chain.getValue(1); 3818 3819 // emit leal symbol@TLSGD(,%ebx,1), %eax 3820 SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag); 3821 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 3822 GA->getValueType(0), 3823 GA->getOffset()); 3824 SDOperand Ops[] = { Chain, TGA, InFlag }; 3825 SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3); 3826 InFlag = Result.getValue(2); 3827 Chain = Result.getValue(1); 3828 3829 // call ___tls_get_addr. This function receives its argument in 3830 // the register EAX. 3831 Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag); 3832 InFlag = Chain.getValue(1); 3833 3834 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 3835 SDOperand Ops1[] = { Chain, 3836 DAG.getTargetExternalSymbol("___tls_get_addr", 3837 PtrVT), 3838 DAG.getRegister(X86::EAX, PtrVT), 3839 DAG.getRegister(X86::EBX, PtrVT), 3840 InFlag }; 3841 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5); 3842 InFlag = Chain.getValue(1); 3843 3844 return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag); 3845} 3846 3847// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 3848// "local exec" model. 3849static SDOperand 3850LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 3851 const MVT::ValueType PtrVT) { 3852 // Get the Thread Pointer 3853 SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT); 3854 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 3855 // exec) 3856 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 3857 GA->getValueType(0), 3858 GA->getOffset()); 3859 SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA); 3860 3861 if (GA->getGlobal()->isDeclaration()) // initial exec TLS model 3862 Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, 3863 &PseudoSourceValue::getGOT(), 0); 3864 3865 // The address of the thread local variable is the add of the thread 3866 // pointer with the offset of the variable. 3867 return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset); 3868} 3869 3870SDOperand 3871X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { 3872 // TODO: implement the "local dynamic" model 3873 // TODO: implement the "initial exec"model for pic executables 3874 assert(!Subtarget->is64Bit() && Subtarget->isTargetELF() && 3875 "TLS not implemented for non-ELF and 64-bit targets"); 3876 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 3877 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 3878 // otherwise use the "Local Exec"TLS Model 3879 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 3880 return LowerToTLSGeneralDynamicModel(GA, DAG, getPointerTy()); 3881 else 3882 return LowerToTLSExecModel(GA, DAG, getPointerTy()); 3883} 3884 3885SDOperand 3886X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { 3887 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 3888 SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 3889 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3890 // With PIC, the address is actually $g + Offset. 3891 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3892 !Subtarget->isPICStyleRIPRel()) { 3893 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3894 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3895 Result); 3896 } 3897 3898 return Result; 3899} 3900 3901SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 3902 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 3903 SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); 3904 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3905 // With PIC, the address is actually $g + Offset. 3906 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3907 !Subtarget->isPICStyleRIPRel()) { 3908 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3909 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3910 Result); 3911 } 3912 3913 return Result; 3914} 3915 3916/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and 3917/// take a 2 x i32 value to shift plus a shift amount. 3918SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { 3919 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 3920 "Not an i64 shift!"); 3921 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 3922 SDOperand ShOpLo = Op.getOperand(0); 3923 SDOperand ShOpHi = Op.getOperand(1); 3924 SDOperand ShAmt = Op.getOperand(2); 3925 SDOperand Tmp1 = isSRA ? 3926 DAG.getNode(ISD::SRA, MVT::i32, ShOpHi, DAG.getConstant(31, MVT::i8)) : 3927 DAG.getConstant(0, MVT::i32); 3928 3929 SDOperand Tmp2, Tmp3; 3930 if (Op.getOpcode() == ISD::SHL_PARTS) { 3931 Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt); 3932 Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt); 3933 } else { 3934 Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt); 3935 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt); 3936 } 3937 3938 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 3939 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, 3940 DAG.getConstant(32, MVT::i8)); 3941 SDOperand Cond = DAG.getNode(X86ISD::CMP, MVT::i32, 3942 AndNode, DAG.getConstant(0, MVT::i8)); 3943 3944 SDOperand Hi, Lo; 3945 SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); 3946 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag); 3947 SmallVector<SDOperand, 4> Ops; 3948 if (Op.getOpcode() == ISD::SHL_PARTS) { 3949 Ops.push_back(Tmp2); 3950 Ops.push_back(Tmp3); 3951 Ops.push_back(CC); 3952 Ops.push_back(Cond); 3953 Hi = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size()); 3954 3955 Ops.clear(); 3956 Ops.push_back(Tmp3); 3957 Ops.push_back(Tmp1); 3958 Ops.push_back(CC); 3959 Ops.push_back(Cond); 3960 Lo = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size()); 3961 } else { 3962 Ops.push_back(Tmp2); 3963 Ops.push_back(Tmp3); 3964 Ops.push_back(CC); 3965 Ops.push_back(Cond); 3966 Lo = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size()); 3967 3968 Ops.clear(); 3969 Ops.push_back(Tmp3); 3970 Ops.push_back(Tmp1); 3971 Ops.push_back(CC); 3972 Ops.push_back(Cond); 3973 Hi = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size()); 3974 } 3975 3976 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32); 3977 Ops.clear(); 3978 Ops.push_back(Lo); 3979 Ops.push_back(Hi); 3980 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size()); 3981} 3982 3983SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 3984 assert(Op.getOperand(0).getValueType() <= MVT::i64 && 3985 Op.getOperand(0).getValueType() >= MVT::i16 && 3986 "Unknown SINT_TO_FP to lower!"); 3987 3988 SDOperand Result; 3989 MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); 3990 unsigned Size = MVT::getSizeInBits(SrcVT)/8; 3991 MachineFunction &MF = DAG.getMachineFunction(); 3992 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 3993 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3994 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), 3995 StackSlot, 3996 &PseudoSourceValue::getFixedStack(), 3997 SSFI); 3998 3999 // These are really Legal; caller falls through into that case. 4000 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 4001 return Result; 4002 if (SrcVT == MVT::i64 && Op.getValueType() != MVT::f80 && 4003 Subtarget->is64Bit()) 4004 return Result; 4005 4006 // Build the FILD 4007 SDVTList Tys; 4008 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 4009 if (useSSE) 4010 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 4011 else 4012 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 4013 SmallVector<SDOperand, 8> Ops; 4014 Ops.push_back(Chain); 4015 Ops.push_back(StackSlot); 4016 Ops.push_back(DAG.getValueType(SrcVT)); 4017 Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG :X86ISD::FILD, 4018 Tys, &Ops[0], Ops.size()); 4019 4020 if (useSSE) { 4021 Chain = Result.getValue(1); 4022 SDOperand InFlag = Result.getValue(2); 4023 4024 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 4025 // shouldn't be necessary except that RFP cannot be live across 4026 // multiple blocks. When stackifier is fixed, they can be uncoupled. 4027 MachineFunction &MF = DAG.getMachineFunction(); 4028 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 4029 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4030 Tys = DAG.getVTList(MVT::Other); 4031 SmallVector<SDOperand, 8> Ops; 4032 Ops.push_back(Chain); 4033 Ops.push_back(Result); 4034 Ops.push_back(StackSlot); 4035 Ops.push_back(DAG.getValueType(Op.getValueType())); 4036 Ops.push_back(InFlag); 4037 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 4038 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, 4039 &PseudoSourceValue::getFixedStack(), SSFI); 4040 } 4041 4042 return Result; 4043} 4044 4045std::pair<SDOperand,SDOperand> X86TargetLowering:: 4046FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) { 4047 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 && 4048 "Unknown FP_TO_SINT to lower!"); 4049 4050 // These are really Legal. 4051 if (Op.getValueType() == MVT::i32 && 4052 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 4053 return std::make_pair(SDOperand(), SDOperand()); 4054 if (Subtarget->is64Bit() && 4055 Op.getValueType() == MVT::i64 && 4056 Op.getOperand(0).getValueType() != MVT::f80) 4057 return std::make_pair(SDOperand(), SDOperand()); 4058 4059 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 4060 // stack slot. 4061 MachineFunction &MF = DAG.getMachineFunction(); 4062 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; 4063 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4064 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4065 unsigned Opc; 4066 switch (Op.getValueType()) { 4067 default: assert(0 && "Invalid FP_TO_SINT to lower!"); 4068 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 4069 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 4070 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 4071 } 4072 4073 SDOperand Chain = DAG.getEntryNode(); 4074 SDOperand Value = Op.getOperand(0); 4075 if (isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) { 4076 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 4077 Chain = DAG.getStore(Chain, Value, StackSlot, 4078 &PseudoSourceValue::getFixedStack(), SSFI); 4079 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 4080 SDOperand Ops[] = { 4081 Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType()) 4082 }; 4083 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 4084 Chain = Value.getValue(1); 4085 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4086 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4087 } 4088 4089 // Build the FP_TO_INT*_IN_MEM 4090 SDOperand Ops[] = { Chain, Value, StackSlot }; 4091 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3); 4092 4093 return std::make_pair(FIST, StackSlot); 4094} 4095 4096SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 4097 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(Op, DAG); 4098 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4099 if (FIST.Val == 0) return SDOperand(); 4100 4101 // Load the result. 4102 return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0); 4103} 4104 4105SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) { 4106 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(SDOperand(N, 0), DAG); 4107 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4108 if (FIST.Val == 0) return 0; 4109 4110 // Return an i64 load from the stack slot. 4111 SDOperand Res = DAG.getLoad(MVT::i64, FIST, StackSlot, NULL, 0); 4112 4113 // Use a MERGE_VALUES node to drop the chain result value. 4114 return DAG.getNode(ISD::MERGE_VALUES, MVT::i64, Res).Val; 4115} 4116 4117SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { 4118 MVT::ValueType VT = Op.getValueType(); 4119 MVT::ValueType EltVT = VT; 4120 if (MVT::isVector(VT)) 4121 EltVT = MVT::getVectorElementType(VT); 4122 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 4123 std::vector<Constant*> CV; 4124 if (EltVT == MVT::f64) { 4125 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, ~(1ULL << 63)))); 4126 CV.push_back(C); 4127 CV.push_back(C); 4128 } else { 4129 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, ~(1U << 31)))); 4130 CV.push_back(C); 4131 CV.push_back(C); 4132 CV.push_back(C); 4133 CV.push_back(C); 4134 } 4135 Constant *C = ConstantVector::get(CV); 4136 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4137 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4138 &PseudoSourceValue::getConstantPool(), 0, 4139 false, 16); 4140 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask); 4141} 4142 4143SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { 4144 MVT::ValueType VT = Op.getValueType(); 4145 MVT::ValueType EltVT = VT; 4146 unsigned EltNum = 1; 4147 if (MVT::isVector(VT)) { 4148 EltVT = MVT::getVectorElementType(VT); 4149 EltNum = MVT::getVectorNumElements(VT); 4150 } 4151 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 4152 std::vector<Constant*> CV; 4153 if (EltVT == MVT::f64) { 4154 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, 1ULL << 63))); 4155 CV.push_back(C); 4156 CV.push_back(C); 4157 } else { 4158 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, 1U << 31))); 4159 CV.push_back(C); 4160 CV.push_back(C); 4161 CV.push_back(C); 4162 CV.push_back(C); 4163 } 4164 Constant *C = ConstantVector::get(CV); 4165 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4166 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4167 &PseudoSourceValue::getConstantPool(), 0, 4168 false, 16); 4169 if (MVT::isVector(VT)) { 4170 return DAG.getNode(ISD::BIT_CONVERT, VT, 4171 DAG.getNode(ISD::XOR, MVT::v2i64, 4172 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)), 4173 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Mask))); 4174 } else { 4175 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask); 4176 } 4177} 4178 4179SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { 4180 SDOperand Op0 = Op.getOperand(0); 4181 SDOperand Op1 = Op.getOperand(1); 4182 MVT::ValueType VT = Op.getValueType(); 4183 MVT::ValueType SrcVT = Op1.getValueType(); 4184 const Type *SrcTy = MVT::getTypeForValueType(SrcVT); 4185 4186 // If second operand is smaller, extend it first. 4187 if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) { 4188 Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1); 4189 SrcVT = VT; 4190 SrcTy = MVT::getTypeForValueType(SrcVT); 4191 } 4192 // And if it is bigger, shrink it first. 4193 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4194 Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1, DAG.getIntPtrConstant(1)); 4195 SrcVT = VT; 4196 SrcTy = MVT::getTypeForValueType(SrcVT); 4197 } 4198 4199 // At this point the operands and the result should have the same 4200 // type, and that won't be f80 since that is not custom lowered. 4201 4202 // First get the sign bit of second operand. 4203 std::vector<Constant*> CV; 4204 if (SrcVT == MVT::f64) { 4205 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 1ULL << 63)))); 4206 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4207 } else { 4208 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 1U << 31)))); 4209 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4210 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4211 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4212 } 4213 Constant *C = ConstantVector::get(CV); 4214 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4215 SDOperand Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx, 4216 &PseudoSourceValue::getConstantPool(), 0, 4217 false, 16); 4218 SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); 4219 4220 // Shift sign bit right or left if the two operands have different types. 4221 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4222 // Op0 is MVT::f32, Op1 is MVT::f64. 4223 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit); 4224 SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit, 4225 DAG.getConstant(32, MVT::i32)); 4226 SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit); 4227 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit, 4228 DAG.getIntPtrConstant(0)); 4229 } 4230 4231 // Clear first operand sign bit. 4232 CV.clear(); 4233 if (VT == MVT::f64) { 4234 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, ~(1ULL << 63))))); 4235 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4236 } else { 4237 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, ~(1U << 31))))); 4238 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4239 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4240 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4241 } 4242 C = ConstantVector::get(CV); 4243 CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4244 SDOperand Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4245 &PseudoSourceValue::getConstantPool(), 0, 4246 false, 16); 4247 SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2); 4248 4249 // Or the value with the sign bit. 4250 return DAG.getNode(X86ISD::FOR, VT, Val, SignBit); 4251} 4252 4253SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { 4254 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 4255 SDOperand Cond; 4256 SDOperand Op0 = Op.getOperand(0); 4257 SDOperand Op1 = Op.getOperand(1); 4258 SDOperand CC = Op.getOperand(2); 4259 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 4260 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); 4261 unsigned X86CC; 4262 4263 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, 4264 Op0, Op1, DAG)) { 4265 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4266 return DAG.getNode(X86ISD::SETCC, MVT::i8, 4267 DAG.getConstant(X86CC, MVT::i8), Cond); 4268 } 4269 4270 assert(isFP && "Illegal integer SetCC!"); 4271 4272 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4273 switch (SetCCOpcode) { 4274 default: assert(false && "Illegal floating point SetCC!"); 4275 case ISD::SETOEQ: { // !PF & ZF 4276 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4277 DAG.getConstant(X86::COND_NP, MVT::i8), Cond); 4278 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4279 DAG.getConstant(X86::COND_E, MVT::i8), Cond); 4280 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); 4281 } 4282 case ISD::SETUNE: { // PF | !ZF 4283 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4284 DAG.getConstant(X86::COND_P, MVT::i8), Cond); 4285 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4286 DAG.getConstant(X86::COND_NE, MVT::i8), Cond); 4287 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); 4288 } 4289 } 4290} 4291 4292 4293SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { 4294 bool addTest = true; 4295 SDOperand Cond = Op.getOperand(0); 4296 SDOperand CC; 4297 4298 if (Cond.getOpcode() == ISD::SETCC) 4299 Cond = LowerSETCC(Cond, DAG); 4300 4301 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4302 // setting operand in place of the X86ISD::SETCC. 4303 if (Cond.getOpcode() == X86ISD::SETCC) { 4304 CC = Cond.getOperand(0); 4305 4306 SDOperand Cmp = Cond.getOperand(1); 4307 unsigned Opc = Cmp.getOpcode(); 4308 MVT::ValueType VT = Op.getValueType(); 4309 4310 bool IllegalFPCMov = false; 4311 if (MVT::isFloatingPoint(VT) && !MVT::isVector(VT) && 4312 !isScalarFPTypeInSSEReg(VT)) // FPStack? 4313 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 4314 4315 if ((Opc == X86ISD::CMP || 4316 Opc == X86ISD::COMI || 4317 Opc == X86ISD::UCOMI) && !IllegalFPCMov) { 4318 Cond = Cmp; 4319 addTest = false; 4320 } 4321 } 4322 4323 if (addTest) { 4324 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4325 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4326 } 4327 4328 const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(), 4329 MVT::Flag); 4330 SmallVector<SDOperand, 4> Ops; 4331 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 4332 // condition is true. 4333 Ops.push_back(Op.getOperand(2)); 4334 Ops.push_back(Op.getOperand(1)); 4335 Ops.push_back(CC); 4336 Ops.push_back(Cond); 4337 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 4338} 4339 4340SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { 4341 bool addTest = true; 4342 SDOperand Chain = Op.getOperand(0); 4343 SDOperand Cond = Op.getOperand(1); 4344 SDOperand Dest = Op.getOperand(2); 4345 SDOperand CC; 4346 4347 if (Cond.getOpcode() == ISD::SETCC) 4348 Cond = LowerSETCC(Cond, DAG); 4349 4350 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4351 // setting operand in place of the X86ISD::SETCC. 4352 if (Cond.getOpcode() == X86ISD::SETCC) { 4353 CC = Cond.getOperand(0); 4354 4355 SDOperand Cmp = Cond.getOperand(1); 4356 unsigned Opc = Cmp.getOpcode(); 4357 if (Opc == X86ISD::CMP || 4358 Opc == X86ISD::COMI || 4359 Opc == X86ISD::UCOMI) { 4360 Cond = Cmp; 4361 addTest = false; 4362 } 4363 } 4364 4365 if (addTest) { 4366 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4367 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4368 } 4369 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), 4370 Chain, Op.getOperand(2), CC, Cond); 4371} 4372 4373 4374// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 4375// Calls to _alloca is needed to probe the stack when allocating more than 4k 4376// bytes in one go. Touching the stack at 4K increments is necessary to ensure 4377// that the guard pages used by the OS virtual memory manager are allocated in 4378// correct sequence. 4379SDOperand 4380X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, 4381 SelectionDAG &DAG) { 4382 assert(Subtarget->isTargetCygMing() && 4383 "This should be used only on Cygwin/Mingw targets"); 4384 4385 // Get the inputs. 4386 SDOperand Chain = Op.getOperand(0); 4387 SDOperand Size = Op.getOperand(1); 4388 // FIXME: Ensure alignment here 4389 4390 SDOperand Flag; 4391 4392 MVT::ValueType IntPtr = getPointerTy(); 4393 MVT::ValueType SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; 4394 4395 Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag); 4396 Flag = Chain.getValue(1); 4397 4398 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4399 SDOperand Ops[] = { Chain, 4400 DAG.getTargetExternalSymbol("_alloca", IntPtr), 4401 DAG.getRegister(X86::EAX, IntPtr), 4402 Flag }; 4403 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4); 4404 Flag = Chain.getValue(1); 4405 4406 Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1); 4407 4408 std::vector<MVT::ValueType> Tys; 4409 Tys.push_back(SPTy); 4410 Tys.push_back(MVT::Other); 4411 SDOperand Ops1[2] = { Chain.getValue(0), Chain }; 4412 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2); 4413} 4414 4415SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) { 4416 SDOperand InFlag(0, 0); 4417 SDOperand Chain = Op.getOperand(0); 4418 unsigned Align = 4419 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 4420 if (Align == 0) Align = 1; 4421 4422 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4423 // If not DWORD aligned or size is more than the threshold, call memset. 4424 // The libc version is likely to be faster for these cases. It can use the 4425 // address value and run time information about the CPU. 4426 if ((Align & 3) != 0 || 4427 (I && I->getValue() > Subtarget->getMaxInlineSizeThreshold())) { 4428 MVT::ValueType IntPtr = getPointerTy(); 4429 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 4430 TargetLowering::ArgListTy Args; 4431 TargetLowering::ArgListEntry Entry; 4432 Entry.Node = Op.getOperand(1); 4433 Entry.Ty = IntPtrTy; 4434 Args.push_back(Entry); 4435 // Extend the unsigned i8 argument to be an int value for the call. 4436 Entry.Node = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2)); 4437 Entry.Ty = IntPtrTy; 4438 Args.push_back(Entry); 4439 Entry.Node = Op.getOperand(3); 4440 Args.push_back(Entry); 4441 std::pair<SDOperand,SDOperand> CallResult = 4442 LowerCallTo(Chain, Type::VoidTy, false, false, CallingConv::C, false, 4443 DAG.getExternalSymbol("memset", IntPtr), Args, DAG); 4444 return CallResult.second; 4445 } 4446 4447 MVT::ValueType AVT; 4448 SDOperand Count; 4449 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4450 unsigned BytesLeft = 0; 4451 bool TwoRepStos = false; 4452 if (ValC) { 4453 unsigned ValReg; 4454 uint64_t Val = ValC->getValue() & 255; 4455 4456 // If the value is a constant, then we can potentially use larger sets. 4457 switch (Align & 3) { 4458 case 2: // WORD aligned 4459 AVT = MVT::i16; 4460 ValReg = X86::AX; 4461 Val = (Val << 8) | Val; 4462 break; 4463 case 0: // DWORD aligned 4464 AVT = MVT::i32; 4465 ValReg = X86::EAX; 4466 Val = (Val << 8) | Val; 4467 Val = (Val << 16) | Val; 4468 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned 4469 AVT = MVT::i64; 4470 ValReg = X86::RAX; 4471 Val = (Val << 32) | Val; 4472 } 4473 break; 4474 default: // Byte aligned 4475 AVT = MVT::i8; 4476 ValReg = X86::AL; 4477 Count = Op.getOperand(3); 4478 break; 4479 } 4480 4481 if (AVT > MVT::i8) { 4482 if (I) { 4483 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4484 Count = DAG.getIntPtrConstant(I->getValue() / UBytes); 4485 BytesLeft = I->getValue() % UBytes; 4486 } else { 4487 assert(AVT >= MVT::i32 && 4488 "Do not use rep;stos if not at least DWORD aligned"); 4489 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 4490 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 4491 TwoRepStos = true; 4492 } 4493 } 4494 4495 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT), 4496 InFlag); 4497 InFlag = Chain.getValue(1); 4498 } else { 4499 AVT = MVT::i8; 4500 Count = Op.getOperand(3); 4501 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag); 4502 InFlag = Chain.getValue(1); 4503 } 4504 4505 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4506 Count, InFlag); 4507 InFlag = Chain.getValue(1); 4508 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4509 Op.getOperand(1), InFlag); 4510 InFlag = Chain.getValue(1); 4511 4512 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4513 SmallVector<SDOperand, 8> Ops; 4514 Ops.push_back(Chain); 4515 Ops.push_back(DAG.getValueType(AVT)); 4516 Ops.push_back(InFlag); 4517 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4518 4519 if (TwoRepStos) { 4520 InFlag = Chain.getValue(1); 4521 Count = Op.getOperand(3); 4522 MVT::ValueType CVT = Count.getValueType(); 4523 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4524 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4525 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4526 Left, InFlag); 4527 InFlag = Chain.getValue(1); 4528 Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4529 Ops.clear(); 4530 Ops.push_back(Chain); 4531 Ops.push_back(DAG.getValueType(MVT::i8)); 4532 Ops.push_back(InFlag); 4533 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4534 } else if (BytesLeft) { 4535 // Issue stores for the last 1 - 7 bytes. 4536 SDOperand Value; 4537 unsigned Val = ValC->getValue() & 255; 4538 unsigned Offset = I->getValue() - BytesLeft; 4539 SDOperand DstAddr = Op.getOperand(1); 4540 MVT::ValueType AddrVT = DstAddr.getValueType(); 4541 if (BytesLeft >= 4) { 4542 Val = (Val << 8) | Val; 4543 Val = (Val << 16) | Val; 4544 Value = DAG.getConstant(Val, MVT::i32); 4545 Chain = DAG.getStore(Chain, Value, 4546 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4547 DAG.getConstant(Offset, AddrVT)), 4548 NULL, 0); 4549 BytesLeft -= 4; 4550 Offset += 4; 4551 } 4552 if (BytesLeft >= 2) { 4553 Value = DAG.getConstant((Val << 8) | Val, MVT::i16); 4554 Chain = DAG.getStore(Chain, Value, 4555 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4556 DAG.getConstant(Offset, AddrVT)), 4557 NULL, 0); 4558 BytesLeft -= 2; 4559 Offset += 2; 4560 } 4561 if (BytesLeft == 1) { 4562 Value = DAG.getConstant(Val, MVT::i8); 4563 Chain = DAG.getStore(Chain, Value, 4564 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4565 DAG.getConstant(Offset, AddrVT)), 4566 NULL, 0); 4567 } 4568 } 4569 4570 return Chain; 4571} 4572 4573SDOperand X86TargetLowering::LowerMEMCPYInline(SDOperand Chain, 4574 SDOperand Dest, 4575 SDOperand Source, 4576 unsigned Size, 4577 unsigned Align, 4578 SelectionDAG &DAG) { 4579 MVT::ValueType AVT; 4580 unsigned BytesLeft = 0; 4581 switch (Align & 3) { 4582 case 2: // WORD aligned 4583 AVT = MVT::i16; 4584 break; 4585 case 0: // DWORD aligned 4586 AVT = MVT::i32; 4587 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned 4588 AVT = MVT::i64; 4589 break; 4590 default: // Byte aligned 4591 AVT = MVT::i8; 4592 break; 4593 } 4594 4595 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4596 SDOperand Count = DAG.getIntPtrConstant(Size / UBytes); 4597 BytesLeft = Size % UBytes; 4598 4599 SDOperand InFlag(0, 0); 4600 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4601 Count, InFlag); 4602 InFlag = Chain.getValue(1); 4603 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4604 Dest, InFlag); 4605 InFlag = Chain.getValue(1); 4606 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI, 4607 Source, InFlag); 4608 InFlag = Chain.getValue(1); 4609 4610 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4611 SmallVector<SDOperand, 8> Ops; 4612 Ops.push_back(Chain); 4613 Ops.push_back(DAG.getValueType(AVT)); 4614 Ops.push_back(InFlag); 4615 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4616 4617 if (BytesLeft) { 4618 // Issue loads and stores for the last 1 - 7 bytes. 4619 unsigned Offset = Size - BytesLeft; 4620 SDOperand DstAddr = Dest; 4621 MVT::ValueType DstVT = DstAddr.getValueType(); 4622 SDOperand SrcAddr = Source; 4623 MVT::ValueType SrcVT = SrcAddr.getValueType(); 4624 SDOperand Value; 4625 if (BytesLeft >= 4) { 4626 Value = DAG.getLoad(MVT::i32, Chain, 4627 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4628 DAG.getConstant(Offset, SrcVT)), 4629 NULL, 0); 4630 Chain = Value.getValue(1); 4631 Chain = DAG.getStore(Chain, Value, 4632 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4633 DAG.getConstant(Offset, DstVT)), 4634 NULL, 0); 4635 BytesLeft -= 4; 4636 Offset += 4; 4637 } 4638 if (BytesLeft >= 2) { 4639 Value = DAG.getLoad(MVT::i16, Chain, 4640 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4641 DAG.getConstant(Offset, SrcVT)), 4642 NULL, 0); 4643 Chain = Value.getValue(1); 4644 Chain = DAG.getStore(Chain, Value, 4645 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4646 DAG.getConstant(Offset, DstVT)), 4647 NULL, 0); 4648 BytesLeft -= 2; 4649 Offset += 2; 4650 } 4651 4652 if (BytesLeft == 1) { 4653 Value = DAG.getLoad(MVT::i8, Chain, 4654 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4655 DAG.getConstant(Offset, SrcVT)), 4656 NULL, 0); 4657 Chain = Value.getValue(1); 4658 Chain = DAG.getStore(Chain, Value, 4659 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4660 DAG.getConstant(Offset, DstVT)), 4661 NULL, 0); 4662 } 4663 } 4664 4665 return Chain; 4666} 4667 4668/// Expand the result of: i64,outchain = READCYCLECOUNTER inchain 4669SDNode *X86TargetLowering::ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG){ 4670 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4671 SDOperand TheChain = N->getOperand(0); 4672 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheChain, 1); 4673 if (Subtarget->is64Bit()) { 4674 SDOperand rax = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); 4675 SDOperand rdx = DAG.getCopyFromReg(rax.getValue(1), X86::RDX, 4676 MVT::i64, rax.getValue(2)); 4677 SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, rdx, 4678 DAG.getConstant(32, MVT::i8)); 4679 SDOperand Ops[] = { 4680 DAG.getNode(ISD::OR, MVT::i64, rax, Tmp), rdx.getValue(1) 4681 }; 4682 4683 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4684 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4685 } 4686 4687 SDOperand eax = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); 4688 SDOperand edx = DAG.getCopyFromReg(eax.getValue(1), X86::EDX, 4689 MVT::i32, eax.getValue(2)); 4690 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 4691 SDOperand Ops[] = { eax, edx }; 4692 Ops[0] = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Ops, 2); 4693 4694 // Use a MERGE_VALUES to return the value and chain. 4695 Ops[1] = edx.getValue(1); 4696 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4697 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4698} 4699 4700SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { 4701 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 4702 4703 if (!Subtarget->is64Bit()) { 4704 // vastart just stores the address of the VarArgsFrameIndex slot into the 4705 // memory location argument. 4706 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4707 return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV, 0); 4708 } 4709 4710 // __va_list_tag: 4711 // gp_offset (0 - 6 * 8) 4712 // fp_offset (48 - 48 + 8 * 16) 4713 // overflow_arg_area (point to parameters coming in memory). 4714 // reg_save_area 4715 SmallVector<SDOperand, 8> MemOps; 4716 SDOperand FIN = Op.getOperand(1); 4717 // Store gp_offset 4718 SDOperand Store = DAG.getStore(Op.getOperand(0), 4719 DAG.getConstant(VarArgsGPOffset, MVT::i32), 4720 FIN, SV, 0); 4721 MemOps.push_back(Store); 4722 4723 // Store fp_offset 4724 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4725 Store = DAG.getStore(Op.getOperand(0), 4726 DAG.getConstant(VarArgsFPOffset, MVT::i32), 4727 FIN, SV, 0); 4728 MemOps.push_back(Store); 4729 4730 // Store ptr to overflow_arg_area 4731 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4732 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4733 Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV, 0); 4734 MemOps.push_back(Store); 4735 4736 // Store ptr to reg_save_area. 4737 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); 4738 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 4739 Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV, 0); 4740 MemOps.push_back(Store); 4741 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size()); 4742} 4743 4744SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) { 4745 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 4746 SDOperand Chain = Op.getOperand(0); 4747 SDOperand DstPtr = Op.getOperand(1); 4748 SDOperand SrcPtr = Op.getOperand(2); 4749 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 4750 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 4751 4752 SrcPtr = DAG.getLoad(getPointerTy(), Chain, SrcPtr, SrcSV, 0); 4753 Chain = SrcPtr.getValue(1); 4754 for (unsigned i = 0; i < 3; ++i) { 4755 SDOperand Val = DAG.getLoad(MVT::i64, Chain, SrcPtr, SrcSV, 0); 4756 Chain = Val.getValue(1); 4757 Chain = DAG.getStore(Chain, Val, DstPtr, DstSV, 0); 4758 if (i == 2) 4759 break; 4760 SrcPtr = DAG.getNode(ISD::ADD, getPointerTy(), SrcPtr, 4761 DAG.getIntPtrConstant(8)); 4762 DstPtr = DAG.getNode(ISD::ADD, getPointerTy(), DstPtr, 4763 DAG.getIntPtrConstant(8)); 4764 } 4765 return Chain; 4766} 4767 4768SDOperand 4769X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 4770 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); 4771 switch (IntNo) { 4772 default: return SDOperand(); // Don't custom lower most intrinsics. 4773 // Comparison intrinsics. 4774 case Intrinsic::x86_sse_comieq_ss: 4775 case Intrinsic::x86_sse_comilt_ss: 4776 case Intrinsic::x86_sse_comile_ss: 4777 case Intrinsic::x86_sse_comigt_ss: 4778 case Intrinsic::x86_sse_comige_ss: 4779 case Intrinsic::x86_sse_comineq_ss: 4780 case Intrinsic::x86_sse_ucomieq_ss: 4781 case Intrinsic::x86_sse_ucomilt_ss: 4782 case Intrinsic::x86_sse_ucomile_ss: 4783 case Intrinsic::x86_sse_ucomigt_ss: 4784 case Intrinsic::x86_sse_ucomige_ss: 4785 case Intrinsic::x86_sse_ucomineq_ss: 4786 case Intrinsic::x86_sse2_comieq_sd: 4787 case Intrinsic::x86_sse2_comilt_sd: 4788 case Intrinsic::x86_sse2_comile_sd: 4789 case Intrinsic::x86_sse2_comigt_sd: 4790 case Intrinsic::x86_sse2_comige_sd: 4791 case Intrinsic::x86_sse2_comineq_sd: 4792 case Intrinsic::x86_sse2_ucomieq_sd: 4793 case Intrinsic::x86_sse2_ucomilt_sd: 4794 case Intrinsic::x86_sse2_ucomile_sd: 4795 case Intrinsic::x86_sse2_ucomigt_sd: 4796 case Intrinsic::x86_sse2_ucomige_sd: 4797 case Intrinsic::x86_sse2_ucomineq_sd: { 4798 unsigned Opc = 0; 4799 ISD::CondCode CC = ISD::SETCC_INVALID; 4800 switch (IntNo) { 4801 default: break; 4802 case Intrinsic::x86_sse_comieq_ss: 4803 case Intrinsic::x86_sse2_comieq_sd: 4804 Opc = X86ISD::COMI; 4805 CC = ISD::SETEQ; 4806 break; 4807 case Intrinsic::x86_sse_comilt_ss: 4808 case Intrinsic::x86_sse2_comilt_sd: 4809 Opc = X86ISD::COMI; 4810 CC = ISD::SETLT; 4811 break; 4812 case Intrinsic::x86_sse_comile_ss: 4813 case Intrinsic::x86_sse2_comile_sd: 4814 Opc = X86ISD::COMI; 4815 CC = ISD::SETLE; 4816 break; 4817 case Intrinsic::x86_sse_comigt_ss: 4818 case Intrinsic::x86_sse2_comigt_sd: 4819 Opc = X86ISD::COMI; 4820 CC = ISD::SETGT; 4821 break; 4822 case Intrinsic::x86_sse_comige_ss: 4823 case Intrinsic::x86_sse2_comige_sd: 4824 Opc = X86ISD::COMI; 4825 CC = ISD::SETGE; 4826 break; 4827 case Intrinsic::x86_sse_comineq_ss: 4828 case Intrinsic::x86_sse2_comineq_sd: 4829 Opc = X86ISD::COMI; 4830 CC = ISD::SETNE; 4831 break; 4832 case Intrinsic::x86_sse_ucomieq_ss: 4833 case Intrinsic::x86_sse2_ucomieq_sd: 4834 Opc = X86ISD::UCOMI; 4835 CC = ISD::SETEQ; 4836 break; 4837 case Intrinsic::x86_sse_ucomilt_ss: 4838 case Intrinsic::x86_sse2_ucomilt_sd: 4839 Opc = X86ISD::UCOMI; 4840 CC = ISD::SETLT; 4841 break; 4842 case Intrinsic::x86_sse_ucomile_ss: 4843 case Intrinsic::x86_sse2_ucomile_sd: 4844 Opc = X86ISD::UCOMI; 4845 CC = ISD::SETLE; 4846 break; 4847 case Intrinsic::x86_sse_ucomigt_ss: 4848 case Intrinsic::x86_sse2_ucomigt_sd: 4849 Opc = X86ISD::UCOMI; 4850 CC = ISD::SETGT; 4851 break; 4852 case Intrinsic::x86_sse_ucomige_ss: 4853 case Intrinsic::x86_sse2_ucomige_sd: 4854 Opc = X86ISD::UCOMI; 4855 CC = ISD::SETGE; 4856 break; 4857 case Intrinsic::x86_sse_ucomineq_ss: 4858 case Intrinsic::x86_sse2_ucomineq_sd: 4859 Opc = X86ISD::UCOMI; 4860 CC = ISD::SETNE; 4861 break; 4862 } 4863 4864 unsigned X86CC; 4865 SDOperand LHS = Op.getOperand(1); 4866 SDOperand RHS = Op.getOperand(2); 4867 translateX86CC(CC, true, X86CC, LHS, RHS, DAG); 4868 4869 SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS); 4870 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8, 4871 DAG.getConstant(X86CC, MVT::i8), Cond); 4872 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); 4873 } 4874 } 4875} 4876 4877SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { 4878 // Depths > 0 not supported yet! 4879 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 4880 return SDOperand(); 4881 4882 // Just load the return address 4883 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 4884 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); 4885} 4886 4887SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { 4888 // Depths > 0 not supported yet! 4889 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 4890 return SDOperand(); 4891 4892 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 4893 return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI, 4894 DAG.getIntPtrConstant(4)); 4895} 4896 4897SDOperand X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDOperand Op, 4898 SelectionDAG &DAG) { 4899 // Is not yet supported on x86-64 4900 if (Subtarget->is64Bit()) 4901 return SDOperand(); 4902 4903 return DAG.getIntPtrConstant(8); 4904} 4905 4906SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG) 4907{ 4908 assert(!Subtarget->is64Bit() && 4909 "Lowering of eh_return builtin is not supported yet on x86-64"); 4910 4911 MachineFunction &MF = DAG.getMachineFunction(); 4912 SDOperand Chain = Op.getOperand(0); 4913 SDOperand Offset = Op.getOperand(1); 4914 SDOperand Handler = Op.getOperand(2); 4915 4916 SDOperand Frame = DAG.getRegister(RegInfo->getFrameRegister(MF), 4917 getPointerTy()); 4918 4919 SDOperand StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame, 4920 DAG.getIntPtrConstant(-4UL)); 4921 StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset); 4922 Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0); 4923 Chain = DAG.getCopyToReg(Chain, X86::ECX, StoreAddr); 4924 MF.getRegInfo().addLiveOut(X86::ECX); 4925 4926 return DAG.getNode(X86ISD::EH_RETURN, MVT::Other, 4927 Chain, DAG.getRegister(X86::ECX, getPointerTy())); 4928} 4929 4930SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, 4931 SelectionDAG &DAG) { 4932 SDOperand Root = Op.getOperand(0); 4933 SDOperand Trmp = Op.getOperand(1); // trampoline 4934 SDOperand FPtr = Op.getOperand(2); // nested function 4935 SDOperand Nest = Op.getOperand(3); // 'nest' parameter value 4936 4937 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 4938 4939 const X86InstrInfo *TII = 4940 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 4941 4942 if (Subtarget->is64Bit()) { 4943 SDOperand OutChains[6]; 4944 4945 // Large code-model. 4946 4947 const unsigned char JMP64r = TII->getBaseOpcodeFor(X86::JMP64r); 4948 const unsigned char MOV64ri = TII->getBaseOpcodeFor(X86::MOV64ri); 4949 4950 const unsigned char N86R10 = 4951 ((X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R10); 4952 const unsigned char N86R11 = 4953 ((X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R11); 4954 4955 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 4956 4957 // Load the pointer to the nested function into R11. 4958 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 4959 SDOperand Addr = Trmp; 4960 OutChains[0] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 4961 TrmpAddr, 0); 4962 4963 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(2, MVT::i64)); 4964 OutChains[1] = DAG.getStore(Root, FPtr, Addr, TrmpAddr, 2, false, 2); 4965 4966 // Load the 'nest' parameter value into R10. 4967 // R10 is specified in X86CallingConv.td 4968 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 4969 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(10, MVT::i64)); 4970 OutChains[2] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 4971 TrmpAddr, 10); 4972 4973 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(12, MVT::i64)); 4974 OutChains[3] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 12, false, 2); 4975 4976 // Jump to the nested function. 4977 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 4978 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(20, MVT::i64)); 4979 OutChains[4] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 4980 TrmpAddr, 20); 4981 4982 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 4983 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(22, MVT::i64)); 4984 OutChains[5] = DAG.getStore(Root, DAG.getConstant(ModRM, MVT::i8), Addr, 4985 TrmpAddr, 22); 4986 4987 SDOperand Ops[] = 4988 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 6) }; 4989 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 4990 } else { 4991 const Function *Func = 4992 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 4993 unsigned CC = Func->getCallingConv(); 4994 unsigned NestReg; 4995 4996 switch (CC) { 4997 default: 4998 assert(0 && "Unsupported calling convention"); 4999 case CallingConv::C: 5000 case CallingConv::X86_StdCall: { 5001 // Pass 'nest' parameter in ECX. 5002 // Must be kept in sync with X86CallingConv.td 5003 NestReg = X86::ECX; 5004 5005 // Check that ECX wasn't needed by an 'inreg' parameter. 5006 const FunctionType *FTy = Func->getFunctionType(); 5007 const ParamAttrsList *Attrs = Func->getParamAttrs(); 5008 5009 if (Attrs && !Func->isVarArg()) { 5010 unsigned InRegCount = 0; 5011 unsigned Idx = 1; 5012 5013 for (FunctionType::param_iterator I = FTy->param_begin(), 5014 E = FTy->param_end(); I != E; ++I, ++Idx) 5015 if (Attrs->paramHasAttr(Idx, ParamAttr::InReg)) 5016 // FIXME: should only count parameters that are lowered to integers. 5017 InRegCount += (getTargetData()->getTypeSizeInBits(*I) + 31) / 32; 5018 5019 if (InRegCount > 2) { 5020 cerr << "Nest register in use - reduce number of inreg parameters!\n"; 5021 abort(); 5022 } 5023 } 5024 break; 5025 } 5026 case CallingConv::X86_FastCall: 5027 // Pass 'nest' parameter in EAX. 5028 // Must be kept in sync with X86CallingConv.td 5029 NestReg = X86::EAX; 5030 break; 5031 } 5032 5033 SDOperand OutChains[4]; 5034 SDOperand Addr, Disp; 5035 5036 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32)); 5037 Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr); 5038 5039 const unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri); 5040 const unsigned char N86Reg = 5041 ((X86RegisterInfo*)RegInfo)->getX86RegNum(NestReg); 5042 OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 5043 Trmp, TrmpAddr, 0); 5044 5045 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32)); 5046 OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 1, false, 1); 5047 5048 const unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP); 5049 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32)); 5050 OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr, 5051 TrmpAddr, 5, false, 1); 5052 5053 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(6, MVT::i32)); 5054 OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpAddr, 6, false, 1); 5055 5056 SDOperand Ops[] = 5057 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) }; 5058 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5059 } 5060} 5061 5062SDOperand X86TargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { 5063 /* 5064 The rounding mode is in bits 11:10 of FPSR, and has the following 5065 settings: 5066 00 Round to nearest 5067 01 Round to -inf 5068 10 Round to +inf 5069 11 Round to 0 5070 5071 FLT_ROUNDS, on the other hand, expects the following: 5072 -1 Undefined 5073 0 Round to 0 5074 1 Round to nearest 5075 2 Round to +inf 5076 3 Round to -inf 5077 5078 To perform the conversion, we do: 5079 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 5080 */ 5081 5082 MachineFunction &MF = DAG.getMachineFunction(); 5083 const TargetMachine &TM = MF.getTarget(); 5084 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 5085 unsigned StackAlignment = TFI.getStackAlignment(); 5086 MVT::ValueType VT = Op.getValueType(); 5087 5088 // Save FP Control Word to stack slot 5089 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment); 5090 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 5091 5092 SDOperand Chain = DAG.getNode(X86ISD::FNSTCW16m, MVT::Other, 5093 DAG.getEntryNode(), StackSlot); 5094 5095 // Load FP Control Word from stack slot 5096 SDOperand CWD = DAG.getLoad(MVT::i16, Chain, StackSlot, NULL, 0); 5097 5098 // Transform as necessary 5099 SDOperand CWD1 = 5100 DAG.getNode(ISD::SRL, MVT::i16, 5101 DAG.getNode(ISD::AND, MVT::i16, 5102 CWD, DAG.getConstant(0x800, MVT::i16)), 5103 DAG.getConstant(11, MVT::i8)); 5104 SDOperand CWD2 = 5105 DAG.getNode(ISD::SRL, MVT::i16, 5106 DAG.getNode(ISD::AND, MVT::i16, 5107 CWD, DAG.getConstant(0x400, MVT::i16)), 5108 DAG.getConstant(9, MVT::i8)); 5109 5110 SDOperand RetVal = 5111 DAG.getNode(ISD::AND, MVT::i16, 5112 DAG.getNode(ISD::ADD, MVT::i16, 5113 DAG.getNode(ISD::OR, MVT::i16, CWD1, CWD2), 5114 DAG.getConstant(1, MVT::i16)), 5115 DAG.getConstant(3, MVT::i16)); 5116 5117 5118 return DAG.getNode((MVT::getSizeInBits(VT) < 16 ? 5119 ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); 5120} 5121 5122SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) { 5123 MVT::ValueType VT = Op.getValueType(); 5124 MVT::ValueType OpVT = VT; 5125 unsigned NumBits = MVT::getSizeInBits(VT); 5126 5127 Op = Op.getOperand(0); 5128 if (VT == MVT::i8) { 5129 // Zero extend to i32 since there is not an i8 bsr. 5130 OpVT = MVT::i32; 5131 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5132 } 5133 5134 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 5135 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5136 Op = DAG.getNode(X86ISD::BSR, VTs, Op); 5137 5138 // If src is zero (i.e. bsr sets ZF), returns NumBits. 5139 SmallVector<SDOperand, 4> Ops; 5140 Ops.push_back(Op); 5141 Ops.push_back(DAG.getConstant(NumBits+NumBits-1, OpVT)); 5142 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5143 Ops.push_back(Op.getValue(1)); 5144 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5145 5146 // Finally xor with NumBits-1. 5147 Op = DAG.getNode(ISD::XOR, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 5148 5149 if (VT == MVT::i8) 5150 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5151 return Op; 5152} 5153 5154SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { 5155 MVT::ValueType VT = Op.getValueType(); 5156 MVT::ValueType OpVT = VT; 5157 unsigned NumBits = MVT::getSizeInBits(VT); 5158 5159 Op = Op.getOperand(0); 5160 if (VT == MVT::i8) { 5161 OpVT = MVT::i32; 5162 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5163 } 5164 5165 // Issue a bsf (scan bits forward) which also sets EFLAGS. 5166 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5167 Op = DAG.getNode(X86ISD::BSF, VTs, Op); 5168 5169 // If src is zero (i.e. bsf sets ZF), returns NumBits. 5170 SmallVector<SDOperand, 4> Ops; 5171 Ops.push_back(Op); 5172 Ops.push_back(DAG.getConstant(NumBits, OpVT)); 5173 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5174 Ops.push_back(Op.getValue(1)); 5175 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5176 5177 if (VT == MVT::i8) 5178 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5179 return Op; 5180} 5181 5182/// LowerOperation - Provide custom lowering hooks for some operations. 5183/// 5184SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 5185 switch (Op.getOpcode()) { 5186 default: assert(0 && "Should not custom lower this!"); 5187 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 5188 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5189 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 5190 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 5191 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 5192 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5193 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 5194 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5195 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 5196 case ISD::SHL_PARTS: 5197 case ISD::SRA_PARTS: 5198 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 5199 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 5200 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 5201 case ISD::FABS: return LowerFABS(Op, DAG); 5202 case ISD::FNEG: return LowerFNEG(Op, DAG); 5203 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 5204 case ISD::SETCC: return LowerSETCC(Op, DAG); 5205 case ISD::SELECT: return LowerSELECT(Op, DAG); 5206 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 5207 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 5208 case ISD::CALL: return LowerCALL(Op, DAG); 5209 case ISD::RET: return LowerRET(Op, DAG); 5210 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); 5211 case ISD::MEMSET: return LowerMEMSET(Op, DAG); 5212 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG); 5213 case ISD::VASTART: return LowerVASTART(Op, DAG); 5214 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 5215 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 5216 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5217 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5218 case ISD::FRAME_TO_ARGS_OFFSET: 5219 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 5220 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 5221 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 5222 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 5223 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5224 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 5225 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 5226 5227 // FIXME: REMOVE THIS WHEN LegalizeDAGTypes lands. 5228 case ISD::READCYCLECOUNTER: 5229 return SDOperand(ExpandREADCYCLECOUNTER(Op.Val, DAG), 0); 5230 } 5231} 5232 5233/// ExpandOperation - Provide custom lowering hooks for expanding operations. 5234SDNode *X86TargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) { 5235 switch (N->getOpcode()) { 5236 default: assert(0 && "Should not custom lower this!"); 5237 case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG); 5238 case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG); 5239 } 5240} 5241 5242const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 5243 switch (Opcode) { 5244 default: return NULL; 5245 case X86ISD::BSF: return "X86ISD::BSF"; 5246 case X86ISD::BSR: return "X86ISD::BSR"; 5247 case X86ISD::SHLD: return "X86ISD::SHLD"; 5248 case X86ISD::SHRD: return "X86ISD::SHRD"; 5249 case X86ISD::FAND: return "X86ISD::FAND"; 5250 case X86ISD::FOR: return "X86ISD::FOR"; 5251 case X86ISD::FXOR: return "X86ISD::FXOR"; 5252 case X86ISD::FSRL: return "X86ISD::FSRL"; 5253 case X86ISD::FILD: return "X86ISD::FILD"; 5254 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 5255 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 5256 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 5257 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 5258 case X86ISD::FLD: return "X86ISD::FLD"; 5259 case X86ISD::FST: return "X86ISD::FST"; 5260 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT"; 5261 case X86ISD::FP_GET_RESULT2: return "X86ISD::FP_GET_RESULT2"; 5262 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT"; 5263 case X86ISD::CALL: return "X86ISD::CALL"; 5264 case X86ISD::TAILCALL: return "X86ISD::TAILCALL"; 5265 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 5266 case X86ISD::CMP: return "X86ISD::CMP"; 5267 case X86ISD::COMI: return "X86ISD::COMI"; 5268 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 5269 case X86ISD::SETCC: return "X86ISD::SETCC"; 5270 case X86ISD::CMOV: return "X86ISD::CMOV"; 5271 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 5272 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 5273 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 5274 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 5275 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 5276 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 5277 case X86ISD::S2VEC: return "X86ISD::S2VEC"; 5278 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 5279 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 5280 case X86ISD::FMAX: return "X86ISD::FMAX"; 5281 case X86ISD::FMIN: return "X86ISD::FMIN"; 5282 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 5283 case X86ISD::FRCP: return "X86ISD::FRCP"; 5284 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 5285 case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER"; 5286 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 5287 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 5288 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 5289 } 5290} 5291 5292// isLegalAddressingMode - Return true if the addressing mode represented 5293// by AM is legal for this target, for a load/store of the specified type. 5294bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 5295 const Type *Ty) const { 5296 // X86 supports extremely general addressing modes. 5297 5298 // X86 allows a sign-extended 32-bit immediate field as a displacement. 5299 if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1) 5300 return false; 5301 5302 if (AM.BaseGV) { 5303 // We can only fold this if we don't need an extra load. 5304 if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false)) 5305 return false; 5306 5307 // X86-64 only supports addr of globals in small code model. 5308 if (Subtarget->is64Bit()) { 5309 if (getTargetMachine().getCodeModel() != CodeModel::Small) 5310 return false; 5311 // If lower 4G is not available, then we must use rip-relative addressing. 5312 if (AM.BaseOffs || AM.Scale > 1) 5313 return false; 5314 } 5315 } 5316 5317 switch (AM.Scale) { 5318 case 0: 5319 case 1: 5320 case 2: 5321 case 4: 5322 case 8: 5323 // These scales always work. 5324 break; 5325 case 3: 5326 case 5: 5327 case 9: 5328 // These scales are formed with basereg+scalereg. Only accept if there is 5329 // no basereg yet. 5330 if (AM.HasBaseReg) 5331 return false; 5332 break; 5333 default: // Other stuff never works. 5334 return false; 5335 } 5336 5337 return true; 5338} 5339 5340 5341bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const { 5342 if (!Ty1->isInteger() || !Ty2->isInteger()) 5343 return false; 5344 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 5345 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 5346 if (NumBits1 <= NumBits2) 5347 return false; 5348 return Subtarget->is64Bit() || NumBits1 < 64; 5349} 5350 5351bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1, 5352 MVT::ValueType VT2) const { 5353 if (!MVT::isInteger(VT1) || !MVT::isInteger(VT2)) 5354 return false; 5355 unsigned NumBits1 = MVT::getSizeInBits(VT1); 5356 unsigned NumBits2 = MVT::getSizeInBits(VT2); 5357 if (NumBits1 <= NumBits2) 5358 return false; 5359 return Subtarget->is64Bit() || NumBits1 < 64; 5360} 5361 5362/// isShuffleMaskLegal - Targets can use this to indicate that they only 5363/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 5364/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 5365/// are assumed to be legal. 5366bool 5367X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { 5368 // Only do shuffles on 128-bit vector types for now. 5369 if (MVT::getSizeInBits(VT) == 64) return false; 5370 return (Mask.Val->getNumOperands() <= 4 || 5371 isIdentityMask(Mask.Val) || 5372 isIdentityMask(Mask.Val, true) || 5373 isSplatMask(Mask.Val) || 5374 isPSHUFHW_PSHUFLWMask(Mask.Val) || 5375 X86::isUNPCKLMask(Mask.Val) || 5376 X86::isUNPCKHMask(Mask.Val) || 5377 X86::isUNPCKL_v_undef_Mask(Mask.Val) || 5378 X86::isUNPCKH_v_undef_Mask(Mask.Val)); 5379} 5380 5381bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps, 5382 MVT::ValueType EVT, 5383 SelectionDAG &DAG) const { 5384 unsigned NumElts = BVOps.size(); 5385 // Only do shuffles on 128-bit vector types for now. 5386 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; 5387 if (NumElts == 2) return true; 5388 if (NumElts == 4) { 5389 return (isMOVLMask(&BVOps[0], 4) || 5390 isCommutedMOVL(&BVOps[0], 4, true) || 5391 isSHUFPMask(&BVOps[0], 4) || 5392 isCommutedSHUFP(&BVOps[0], 4)); 5393 } 5394 return false; 5395} 5396 5397//===----------------------------------------------------------------------===// 5398// X86 Scheduler Hooks 5399//===----------------------------------------------------------------------===// 5400 5401MachineBasicBlock * 5402X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 5403 MachineBasicBlock *BB) { 5404 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5405 switch (MI->getOpcode()) { 5406 default: assert(false && "Unexpected instr type to insert"); 5407 case X86::CMOV_FR32: 5408 case X86::CMOV_FR64: 5409 case X86::CMOV_V4F32: 5410 case X86::CMOV_V2F64: 5411 case X86::CMOV_V2I64: { 5412 // To "insert" a SELECT_CC instruction, we actually have to insert the 5413 // diamond control-flow pattern. The incoming instruction knows the 5414 // destination vreg to set, the condition code register to branch on, the 5415 // true/false values to select between, and a branch opcode to use. 5416 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5417 ilist<MachineBasicBlock>::iterator It = BB; 5418 ++It; 5419 5420 // thisMBB: 5421 // ... 5422 // TrueVal = ... 5423 // cmpTY ccX, r1, r2 5424 // bCC copy1MBB 5425 // fallthrough --> copy0MBB 5426 MachineBasicBlock *thisMBB = BB; 5427 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 5428 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 5429 unsigned Opc = 5430 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 5431 BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB); 5432 MachineFunction *F = BB->getParent(); 5433 F->getBasicBlockList().insert(It, copy0MBB); 5434 F->getBasicBlockList().insert(It, sinkMBB); 5435 // Update machine-CFG edges by first adding all successors of the current 5436 // block to the new block which will contain the Phi node for the select. 5437 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 5438 e = BB->succ_end(); i != e; ++i) 5439 sinkMBB->addSuccessor(*i); 5440 // Next, remove all successors of the current block, and add the true 5441 // and fallthrough blocks as its successors. 5442 while(!BB->succ_empty()) 5443 BB->removeSuccessor(BB->succ_begin()); 5444 BB->addSuccessor(copy0MBB); 5445 BB->addSuccessor(sinkMBB); 5446 5447 // copy0MBB: 5448 // %FalseValue = ... 5449 // # fallthrough to sinkMBB 5450 BB = copy0MBB; 5451 5452 // Update machine-CFG edges 5453 BB->addSuccessor(sinkMBB); 5454 5455 // sinkMBB: 5456 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 5457 // ... 5458 BB = sinkMBB; 5459 BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg()) 5460 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 5461 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 5462 5463 delete MI; // The pseudo instruction is gone now. 5464 return BB; 5465 } 5466 5467 case X86::FP32_TO_INT16_IN_MEM: 5468 case X86::FP32_TO_INT32_IN_MEM: 5469 case X86::FP32_TO_INT64_IN_MEM: 5470 case X86::FP64_TO_INT16_IN_MEM: 5471 case X86::FP64_TO_INT32_IN_MEM: 5472 case X86::FP64_TO_INT64_IN_MEM: 5473 case X86::FP80_TO_INT16_IN_MEM: 5474 case X86::FP80_TO_INT32_IN_MEM: 5475 case X86::FP80_TO_INT64_IN_MEM: { 5476 // Change the floating point control register to use "round towards zero" 5477 // mode when truncating to an integer value. 5478 MachineFunction *F = BB->getParent(); 5479 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2); 5480 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx); 5481 5482 // Load the old value of the high byte of the control word... 5483 unsigned OldCW = 5484 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass); 5485 addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx); 5486 5487 // Set the high part to be round to zero... 5488 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx) 5489 .addImm(0xC7F); 5490 5491 // Reload the modified control word now... 5492 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5493 5494 // Restore the memory image of control word to original value 5495 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx) 5496 .addReg(OldCW); 5497 5498 // Get the X86 opcode to use. 5499 unsigned Opc; 5500 switch (MI->getOpcode()) { 5501 default: assert(0 && "illegal opcode!"); 5502 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 5503 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 5504 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 5505 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 5506 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 5507 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 5508 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 5509 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 5510 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 5511 } 5512 5513 X86AddressMode AM; 5514 MachineOperand &Op = MI->getOperand(0); 5515 if (Op.isRegister()) { 5516 AM.BaseType = X86AddressMode::RegBase; 5517 AM.Base.Reg = Op.getReg(); 5518 } else { 5519 AM.BaseType = X86AddressMode::FrameIndexBase; 5520 AM.Base.FrameIndex = Op.getIndex(); 5521 } 5522 Op = MI->getOperand(1); 5523 if (Op.isImmediate()) 5524 AM.Scale = Op.getImm(); 5525 Op = MI->getOperand(2); 5526 if (Op.isImmediate()) 5527 AM.IndexReg = Op.getImm(); 5528 Op = MI->getOperand(3); 5529 if (Op.isGlobalAddress()) { 5530 AM.GV = Op.getGlobal(); 5531 } else { 5532 AM.Disp = Op.getImm(); 5533 } 5534 addFullAddress(BuildMI(BB, TII->get(Opc)), AM) 5535 .addReg(MI->getOperand(4).getReg()); 5536 5537 // Reload the original control word now. 5538 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5539 5540 delete MI; // The pseudo instruction is gone now. 5541 return BB; 5542 } 5543 } 5544} 5545 5546//===----------------------------------------------------------------------===// 5547// X86 Optimization Hooks 5548//===----------------------------------------------------------------------===// 5549 5550void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 5551 uint64_t Mask, 5552 uint64_t &KnownZero, 5553 uint64_t &KnownOne, 5554 const SelectionDAG &DAG, 5555 unsigned Depth) const { 5556 unsigned Opc = Op.getOpcode(); 5557 assert((Opc >= ISD::BUILTIN_OP_END || 5558 Opc == ISD::INTRINSIC_WO_CHAIN || 5559 Opc == ISD::INTRINSIC_W_CHAIN || 5560 Opc == ISD::INTRINSIC_VOID) && 5561 "Should use MaskedValueIsZero if you don't know whether Op" 5562 " is a target node!"); 5563 5564 KnownZero = KnownOne = 0; // Don't know anything. 5565 switch (Opc) { 5566 default: break; 5567 case X86ISD::SETCC: 5568 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL); 5569 break; 5570 } 5571} 5572 5573/// getShuffleScalarElt - Returns the scalar element that will make up the ith 5574/// element of the result of the vector shuffle. 5575static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) { 5576 MVT::ValueType VT = N->getValueType(0); 5577 SDOperand PermMask = N->getOperand(2); 5578 unsigned NumElems = PermMask.getNumOperands(); 5579 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1); 5580 i %= NumElems; 5581 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5582 return (i == 0) 5583 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5584 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) { 5585 SDOperand Idx = PermMask.getOperand(i); 5586 if (Idx.getOpcode() == ISD::UNDEF) 5587 return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5588 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG); 5589 } 5590 return SDOperand(); 5591} 5592 5593/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 5594/// node is a GlobalAddress + an offset. 5595static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) { 5596 unsigned Opc = N->getOpcode(); 5597 if (Opc == X86ISD::Wrapper) { 5598 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) { 5599 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 5600 return true; 5601 } 5602 } else if (Opc == ISD::ADD) { 5603 SDOperand N1 = N->getOperand(0); 5604 SDOperand N2 = N->getOperand(1); 5605 if (isGAPlusOffset(N1.Val, GA, Offset)) { 5606 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 5607 if (V) { 5608 Offset += V->getSignExtended(); 5609 return true; 5610 } 5611 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 5612 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 5613 if (V) { 5614 Offset += V->getSignExtended(); 5615 return true; 5616 } 5617 } 5618 } 5619 return false; 5620} 5621 5622/// isConsecutiveLoad - Returns true if N is loading from an address of Base 5623/// + Dist * Size. 5624static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size, 5625 MachineFrameInfo *MFI) { 5626 if (N->getOperand(0).Val != Base->getOperand(0).Val) 5627 return false; 5628 5629 SDOperand Loc = N->getOperand(1); 5630 SDOperand BaseLoc = Base->getOperand(1); 5631 if (Loc.getOpcode() == ISD::FrameIndex) { 5632 if (BaseLoc.getOpcode() != ISD::FrameIndex) 5633 return false; 5634 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 5635 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 5636 int FS = MFI->getObjectSize(FI); 5637 int BFS = MFI->getObjectSize(BFI); 5638 if (FS != BFS || FS != Size) return false; 5639 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size); 5640 } else { 5641 GlobalValue *GV1 = NULL; 5642 GlobalValue *GV2 = NULL; 5643 int64_t Offset1 = 0; 5644 int64_t Offset2 = 0; 5645 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 5646 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 5647 if (isGA1 && isGA2 && GV1 == GV2) 5648 return Offset1 == (Offset2 + Dist*Size); 5649 } 5650 5651 return false; 5652} 5653 5654static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI, 5655 const X86Subtarget *Subtarget) { 5656 GlobalValue *GV; 5657 int64_t Offset = 0; 5658 if (isGAPlusOffset(Base, GV, Offset)) 5659 return (GV->getAlignment() >= 16 && (Offset % 16) == 0); 5660 // DAG combine handles the stack object case. 5661 return false; 5662} 5663 5664 5665/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 5666/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 5667/// if the load addresses are consecutive, non-overlapping, and in the right 5668/// order. 5669static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 5670 const X86Subtarget *Subtarget) { 5671 MachineFunction &MF = DAG.getMachineFunction(); 5672 MachineFrameInfo *MFI = MF.getFrameInfo(); 5673 MVT::ValueType VT = N->getValueType(0); 5674 MVT::ValueType EVT = MVT::getVectorElementType(VT); 5675 SDOperand PermMask = N->getOperand(2); 5676 int NumElems = (int)PermMask.getNumOperands(); 5677 SDNode *Base = NULL; 5678 for (int i = 0; i < NumElems; ++i) { 5679 SDOperand Idx = PermMask.getOperand(i); 5680 if (Idx.getOpcode() == ISD::UNDEF) { 5681 if (!Base) return SDOperand(); 5682 } else { 5683 SDOperand Arg = 5684 getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG); 5685 if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val)) 5686 return SDOperand(); 5687 if (!Base) 5688 Base = Arg.Val; 5689 else if (!isConsecutiveLoad(Arg.Val, Base, 5690 i, MVT::getSizeInBits(EVT)/8,MFI)) 5691 return SDOperand(); 5692 } 5693 } 5694 5695 bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget); 5696 LoadSDNode *LD = cast<LoadSDNode>(Base); 5697 if (isAlign16) { 5698 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 5699 LD->getSrcValueOffset(), LD->isVolatile()); 5700 } else { 5701 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 5702 LD->getSrcValueOffset(), LD->isVolatile(), 5703 LD->getAlignment()); 5704 } 5705} 5706 5707/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 5708static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 5709 const X86Subtarget *Subtarget) { 5710 SDOperand Cond = N->getOperand(0); 5711 5712 // If we have SSE[12] support, try to form min/max nodes. 5713 if (Subtarget->hasSSE2() && 5714 (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) { 5715 if (Cond.getOpcode() == ISD::SETCC) { 5716 // Get the LHS/RHS of the select. 5717 SDOperand LHS = N->getOperand(1); 5718 SDOperand RHS = N->getOperand(2); 5719 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 5720 5721 unsigned Opcode = 0; 5722 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) { 5723 switch (CC) { 5724 default: break; 5725 case ISD::SETOLE: // (X <= Y) ? X : Y -> min 5726 case ISD::SETULE: 5727 case ISD::SETLE: 5728 if (!UnsafeFPMath) break; 5729 // FALL THROUGH. 5730 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min 5731 case ISD::SETLT: 5732 Opcode = X86ISD::FMIN; 5733 break; 5734 5735 case ISD::SETOGT: // (X > Y) ? X : Y -> max 5736 case ISD::SETUGT: 5737 case ISD::SETGT: 5738 if (!UnsafeFPMath) break; 5739 // FALL THROUGH. 5740 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max 5741 case ISD::SETGE: 5742 Opcode = X86ISD::FMAX; 5743 break; 5744 } 5745 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) { 5746 switch (CC) { 5747 default: break; 5748 case ISD::SETOGT: // (X > Y) ? Y : X -> min 5749 case ISD::SETUGT: 5750 case ISD::SETGT: 5751 if (!UnsafeFPMath) break; 5752 // FALL THROUGH. 5753 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min 5754 case ISD::SETGE: 5755 Opcode = X86ISD::FMIN; 5756 break; 5757 5758 case ISD::SETOLE: // (X <= Y) ? Y : X -> max 5759 case ISD::SETULE: 5760 case ISD::SETLE: 5761 if (!UnsafeFPMath) break; 5762 // FALL THROUGH. 5763 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max 5764 case ISD::SETLT: 5765 Opcode = X86ISD::FMAX; 5766 break; 5767 } 5768 } 5769 5770 if (Opcode) 5771 return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS); 5772 } 5773 5774 } 5775 5776 return SDOperand(); 5777} 5778 5779/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 5780/// X86ISD::FXOR nodes. 5781static SDOperand PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 5782 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 5783 // F[X]OR(0.0, x) -> x 5784 // F[X]OR(x, 0.0) -> x 5785 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 5786 if (C->getValueAPF().isPosZero()) 5787 return N->getOperand(1); 5788 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 5789 if (C->getValueAPF().isPosZero()) 5790 return N->getOperand(0); 5791 return SDOperand(); 5792} 5793 5794/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 5795static SDOperand PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 5796 // FAND(0.0, x) -> 0.0 5797 // FAND(x, 0.0) -> 0.0 5798 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 5799 if (C->getValueAPF().isPosZero()) 5800 return N->getOperand(0); 5801 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 5802 if (C->getValueAPF().isPosZero()) 5803 return N->getOperand(1); 5804 return SDOperand(); 5805} 5806 5807 5808SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, 5809 DAGCombinerInfo &DCI) const { 5810 SelectionDAG &DAG = DCI.DAG; 5811 switch (N->getOpcode()) { 5812 default: break; 5813 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, Subtarget); 5814 case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget); 5815 case X86ISD::FXOR: 5816 case X86ISD::FOR: return PerformFORCombine(N, DAG); 5817 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 5818 } 5819 5820 return SDOperand(); 5821} 5822 5823//===----------------------------------------------------------------------===// 5824// X86 Inline Assembly Support 5825//===----------------------------------------------------------------------===// 5826 5827/// getConstraintType - Given a constraint letter, return the type of 5828/// constraint it is for this target. 5829X86TargetLowering::ConstraintType 5830X86TargetLowering::getConstraintType(const std::string &Constraint) const { 5831 if (Constraint.size() == 1) { 5832 switch (Constraint[0]) { 5833 case 'A': 5834 case 'r': 5835 case 'R': 5836 case 'l': 5837 case 'q': 5838 case 'Q': 5839 case 'x': 5840 case 'Y': 5841 return C_RegisterClass; 5842 default: 5843 break; 5844 } 5845 } 5846 return TargetLowering::getConstraintType(Constraint); 5847} 5848 5849/// LowerXConstraint - try to replace an X constraint, which matches anything, 5850/// with another that has more specific requirements based on the type of the 5851/// corresponding operand. 5852void X86TargetLowering::lowerXConstraint(MVT::ValueType ConstraintVT, 5853 std::string& s) const { 5854 if (MVT::isFloatingPoint(ConstraintVT)) { 5855 if (Subtarget->hasSSE2()) 5856 s = "Y"; 5857 else if (Subtarget->hasSSE1()) 5858 s = "x"; 5859 else 5860 s = "f"; 5861 } else 5862 return TargetLowering::lowerXConstraint(ConstraintVT, s); 5863} 5864 5865/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 5866/// vector. If it is invalid, don't add anything to Ops. 5867void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, 5868 char Constraint, 5869 std::vector<SDOperand>&Ops, 5870 SelectionDAG &DAG) { 5871 SDOperand Result(0, 0); 5872 5873 switch (Constraint) { 5874 default: break; 5875 case 'I': 5876 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 5877 if (C->getValue() <= 31) { 5878 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 5879 break; 5880 } 5881 } 5882 return; 5883 case 'N': 5884 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 5885 if (C->getValue() <= 255) { 5886 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 5887 break; 5888 } 5889 } 5890 return; 5891 case 'i': { 5892 // Literal immediates are always ok. 5893 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 5894 Result = DAG.getTargetConstant(CST->getValue(), Op.getValueType()); 5895 break; 5896 } 5897 5898 // If we are in non-pic codegen mode, we allow the address of a global (with 5899 // an optional displacement) to be used with 'i'. 5900 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 5901 int64_t Offset = 0; 5902 5903 // Match either (GA) or (GA+C) 5904 if (GA) { 5905 Offset = GA->getOffset(); 5906 } else if (Op.getOpcode() == ISD::ADD) { 5907 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 5908 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 5909 if (C && GA) { 5910 Offset = GA->getOffset()+C->getValue(); 5911 } else { 5912 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 5913 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 5914 if (C && GA) 5915 Offset = GA->getOffset()+C->getValue(); 5916 else 5917 C = 0, GA = 0; 5918 } 5919 } 5920 5921 if (GA) { 5922 // If addressing this global requires a load (e.g. in PIC mode), we can't 5923 // match. 5924 if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(), 5925 false)) 5926 return; 5927 5928 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), 5929 Offset); 5930 Result = Op; 5931 break; 5932 } 5933 5934 // Otherwise, not valid for this mode. 5935 return; 5936 } 5937 } 5938 5939 if (Result.Val) { 5940 Ops.push_back(Result); 5941 return; 5942 } 5943 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 5944} 5945 5946std::vector<unsigned> X86TargetLowering:: 5947getRegClassForInlineAsmConstraint(const std::string &Constraint, 5948 MVT::ValueType VT) const { 5949 if (Constraint.size() == 1) { 5950 // FIXME: not handling fp-stack yet! 5951 switch (Constraint[0]) { // GCC X86 Constraint Letters 5952 default: break; // Unknown constraint letter 5953 case 'A': // EAX/EDX 5954 if (VT == MVT::i32 || VT == MVT::i64) 5955 return make_vector<unsigned>(X86::EAX, X86::EDX, 0); 5956 break; 5957 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode) 5958 case 'Q': // Q_REGS 5959 if (VT == MVT::i32) 5960 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 5961 else if (VT == MVT::i16) 5962 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 5963 else if (VT == MVT::i8) 5964 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0); 5965 else if (VT == MVT::i64) 5966 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0); 5967 break; 5968 } 5969 } 5970 5971 return std::vector<unsigned>(); 5972} 5973 5974std::pair<unsigned, const TargetRegisterClass*> 5975X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 5976 MVT::ValueType VT) const { 5977 // First, see if this is a constraint that directly corresponds to an LLVM 5978 // register class. 5979 if (Constraint.size() == 1) { 5980 // GCC Constraint Letters 5981 switch (Constraint[0]) { 5982 default: break; 5983 case 'r': // GENERAL_REGS 5984 case 'R': // LEGACY_REGS 5985 case 'l': // INDEX_REGS 5986 if (VT == MVT::i64 && Subtarget->is64Bit()) 5987 return std::make_pair(0U, X86::GR64RegisterClass); 5988 if (VT == MVT::i32) 5989 return std::make_pair(0U, X86::GR32RegisterClass); 5990 else if (VT == MVT::i16) 5991 return std::make_pair(0U, X86::GR16RegisterClass); 5992 else if (VT == MVT::i8) 5993 return std::make_pair(0U, X86::GR8RegisterClass); 5994 break; 5995 case 'y': // MMX_REGS if MMX allowed. 5996 if (!Subtarget->hasMMX()) break; 5997 return std::make_pair(0U, X86::VR64RegisterClass); 5998 break; 5999 case 'Y': // SSE_REGS if SSE2 allowed 6000 if (!Subtarget->hasSSE2()) break; 6001 // FALL THROUGH. 6002 case 'x': // SSE_REGS if SSE1 allowed 6003 if (!Subtarget->hasSSE1()) break; 6004 6005 switch (VT) { 6006 default: break; 6007 // Scalar SSE types. 6008 case MVT::f32: 6009 case MVT::i32: 6010 return std::make_pair(0U, X86::FR32RegisterClass); 6011 case MVT::f64: 6012 case MVT::i64: 6013 return std::make_pair(0U, X86::FR64RegisterClass); 6014 // Vector types. 6015 case MVT::v16i8: 6016 case MVT::v8i16: 6017 case MVT::v4i32: 6018 case MVT::v2i64: 6019 case MVT::v4f32: 6020 case MVT::v2f64: 6021 return std::make_pair(0U, X86::VR128RegisterClass); 6022 } 6023 break; 6024 } 6025 } 6026 6027 // Use the default implementation in TargetLowering to convert the register 6028 // constraint into a member of a register class. 6029 std::pair<unsigned, const TargetRegisterClass*> Res; 6030 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 6031 6032 // Not found as a standard register? 6033 if (Res.second == 0) { 6034 // GCC calls "st(0)" just plain "st". 6035 if (StringsEqualNoCase("{st}", Constraint)) { 6036 Res.first = X86::ST0; 6037 Res.second = X86::RFP80RegisterClass; 6038 } 6039 6040 return Res; 6041 } 6042 6043 // Otherwise, check to see if this is a register class of the wrong value 6044 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 6045 // turn into {ax},{dx}. 6046 if (Res.second->hasType(VT)) 6047 return Res; // Correct type already, nothing to do. 6048 6049 // All of the single-register GCC register classes map their values onto 6050 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 6051 // really want an 8-bit or 32-bit register, map to the appropriate register 6052 // class and return the appropriate register. 6053 if (Res.second != X86::GR16RegisterClass) 6054 return Res; 6055 6056 if (VT == MVT::i8) { 6057 unsigned DestReg = 0; 6058 switch (Res.first) { 6059 default: break; 6060 case X86::AX: DestReg = X86::AL; break; 6061 case X86::DX: DestReg = X86::DL; break; 6062 case X86::CX: DestReg = X86::CL; break; 6063 case X86::BX: DestReg = X86::BL; break; 6064 } 6065 if (DestReg) { 6066 Res.first = DestReg; 6067 Res.second = Res.second = X86::GR8RegisterClass; 6068 } 6069 } else if (VT == MVT::i32) { 6070 unsigned DestReg = 0; 6071 switch (Res.first) { 6072 default: break; 6073 case X86::AX: DestReg = X86::EAX; break; 6074 case X86::DX: DestReg = X86::EDX; break; 6075 case X86::CX: DestReg = X86::ECX; break; 6076 case X86::BX: DestReg = X86::EBX; break; 6077 case X86::SI: DestReg = X86::ESI; break; 6078 case X86::DI: DestReg = X86::EDI; break; 6079 case X86::BP: DestReg = X86::EBP; break; 6080 case X86::SP: DestReg = X86::ESP; break; 6081 } 6082 if (DestReg) { 6083 Res.first = DestReg; 6084 Res.second = Res.second = X86::GR32RegisterClass; 6085 } 6086 } else if (VT == MVT::i64) { 6087 unsigned DestReg = 0; 6088 switch (Res.first) { 6089 default: break; 6090 case X86::AX: DestReg = X86::RAX; break; 6091 case X86::DX: DestReg = X86::RDX; break; 6092 case X86::CX: DestReg = X86::RCX; break; 6093 case X86::BX: DestReg = X86::RBX; break; 6094 case X86::SI: DestReg = X86::RSI; break; 6095 case X86::DI: DestReg = X86::RDI; break; 6096 case X86::BP: DestReg = X86::RBP; break; 6097 case X86::SP: DestReg = X86::RSP; break; 6098 } 6099 if (DestReg) { 6100 Res.first = DestReg; 6101 Res.second = Res.second = X86::GR64RegisterClass; 6102 } 6103 } 6104 6105 return Res; 6106} 6107