X86ISelLowering.cpp revision f3597a13ae66dbe682029f64fb1a7f2b65ff6536
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by Chris Lattner and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86ISelLowering.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86TargetMachine.h" 20#include "llvm/CallingConv.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/Function.h" 24#include "llvm/Intrinsics.h" 25#include "llvm/ADT/VectorExtras.h" 26#include "llvm/Analysis/ScalarEvolutionExpressions.h" 27#include "llvm/CodeGen/MachineFrameInfo.h" 28#include "llvm/CodeGen/MachineFunction.h" 29#include "llvm/CodeGen/MachineInstrBuilder.h" 30#include "llvm/CodeGen/SelectionDAG.h" 31#include "llvm/CodeGen/SSARegMap.h" 32#include "llvm/Support/MathExtras.h" 33#include "llvm/Target/TargetOptions.h" 34#include "llvm/Support/CommandLine.h" 35#include "llvm/ADT/StringExtras.h" 36using namespace llvm; 37 38// FIXME: temporary. 39static cl::opt<bool> EnableFastCC("enable-x86-fastcc", cl::Hidden, 40 cl::desc("Enable fastcc on X86")); 41X86TargetLowering::X86TargetLowering(TargetMachine &TM) 42 : TargetLowering(TM) { 43 Subtarget = &TM.getSubtarget<X86Subtarget>(); 44 X86ScalarSSE = Subtarget->hasSSE2(); 45 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 46 47 // Set up the TargetLowering object. 48 49 // X86 is weird, it always uses i8 for shift amounts and setcc results. 50 setShiftAmountType(MVT::i8); 51 setSetCCResultType(MVT::i8); 52 setSetCCResultContents(ZeroOrOneSetCCResult); 53 setSchedulingPreference(SchedulingForRegPressure); 54 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0 55 setStackPointerRegisterToSaveRestore(X86StackPtr); 56 57 if (!Subtarget->isTargetDarwin()) 58 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 59 setUseUnderscoreSetJmpLongJmp(true); 60 61 // Add legal addressing mode scale values. 62 addLegalAddressScale(8); 63 addLegalAddressScale(4); 64 addLegalAddressScale(2); 65 // Enter the ones which require both scale + index last. These are more 66 // expensive. 67 addLegalAddressScale(9); 68 addLegalAddressScale(5); 69 addLegalAddressScale(3); 70 71 // Set up the register classes. 72 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 73 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 74 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 75 if (Subtarget->is64Bit()) 76 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 77 78 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand); 79 80 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 81 // operation. 82 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 83 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 84 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 85 86 if (Subtarget->is64Bit()) { 87 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 88 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 89 } else { 90 if (X86ScalarSSE) 91 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP. 92 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand); 93 else 94 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 95 } 96 97 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 98 // this operation. 99 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 100 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 101 // SSE has no i16 to fp conversion, only i32 102 if (X86ScalarSSE) 103 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 104 else { 105 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 106 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 107 } 108 109 if (!Subtarget->is64Bit()) { 110 // Custom lower SINT_TO_FP and FP_TO_SINT from/to i64 in 32-bit mode. 111 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 112 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 113 } 114 115 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 116 // this operation. 117 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 118 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 119 120 if (X86ScalarSSE) { 121 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 122 } else { 123 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 124 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 125 } 126 127 // Handle FP_TO_UINT by promoting the destination to a larger signed 128 // conversion. 129 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 130 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 131 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 132 133 if (Subtarget->is64Bit()) { 134 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 135 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 136 } else { 137 if (X86ScalarSSE && !Subtarget->hasSSE3()) 138 // Expand FP_TO_UINT into a select. 139 // FIXME: We would like to use a Custom expander here eventually to do 140 // the optimal thing for SSE vs. the default expansion in the legalizer. 141 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 142 else 143 // With SSE3 we can use fisttpll to convert to a signed i64. 144 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 145 } 146 147 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 148 if (!X86ScalarSSE) { 149 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 150 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 151 } 152 if (Subtarget->is64Bit()) { 153 setOperationAction(ISD::BIT_CONVERT , MVT::f64 , Expand); 154 setOperationAction(ISD::BIT_CONVERT , MVT::i64 , Expand); 155 } 156 157 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 158 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 159 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 160 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 161 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); 162 if (Subtarget->is64Bit()) 163 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand); 164 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand); 165 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand); 166 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 167 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 168 setOperationAction(ISD::FREM , MVT::f64 , Expand); 169 170 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 171 setOperationAction(ISD::CTTZ , MVT::i8 , Expand); 172 setOperationAction(ISD::CTLZ , MVT::i8 , Expand); 173 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 174 setOperationAction(ISD::CTTZ , MVT::i16 , Expand); 175 setOperationAction(ISD::CTLZ , MVT::i16 , Expand); 176 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 177 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 178 setOperationAction(ISD::CTLZ , MVT::i32 , Expand); 179 if (Subtarget->is64Bit()) { 180 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 181 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 182 setOperationAction(ISD::CTLZ , MVT::i64 , Expand); 183 } 184 185 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 186 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 187 188 // These should be promoted to a larger select which is supported. 189 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 190 setOperationAction(ISD::SELECT , MVT::i8 , Promote); 191 // X86 wants to expand cmov itself. 192 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 193 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 194 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 195 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 196 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 197 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 198 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 199 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 200 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 201 if (Subtarget->is64Bit()) { 202 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 203 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 204 } 205 // X86 ret instruction may pop stack. 206 setOperationAction(ISD::RET , MVT::Other, Custom); 207 // Darwin ABI issue. 208 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 209 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 210 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 211 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 212 if (Subtarget->is64Bit()) { 213 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 214 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 215 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 216 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 217 } 218 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 219 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 220 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 221 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 222 // X86 wants to expand memset / memcpy itself. 223 setOperationAction(ISD::MEMSET , MVT::Other, Custom); 224 setOperationAction(ISD::MEMCPY , MVT::Other, Custom); 225 226 // We don't have line number support yet. 227 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 228 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 229 // FIXME - use subtarget debug flags 230 if (!Subtarget->isTargetDarwin() && 231 !Subtarget->isTargetELF() && 232 !Subtarget->isTargetCygwin()) 233 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand); 234 235 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 236 setOperationAction(ISD::VASTART , MVT::Other, Custom); 237 238 // Use the default implementation. 239 setOperationAction(ISD::VAARG , MVT::Other, Expand); 240 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 241 setOperationAction(ISD::VAEND , MVT::Other, Expand); 242 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 243 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 244 if (Subtarget->is64Bit()) 245 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 246 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand); 247 248 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 249 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 250 251 if (X86ScalarSSE) { 252 // Set up the FP register classes. 253 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 254 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 255 256 // Use ANDPD to simulate FABS. 257 setOperationAction(ISD::FABS , MVT::f64, Custom); 258 setOperationAction(ISD::FABS , MVT::f32, Custom); 259 260 // Use XORP to simulate FNEG. 261 setOperationAction(ISD::FNEG , MVT::f64, Custom); 262 setOperationAction(ISD::FNEG , MVT::f32, Custom); 263 264 // We don't support sin/cos/fmod 265 setOperationAction(ISD::FSIN , MVT::f64, Expand); 266 setOperationAction(ISD::FCOS , MVT::f64, Expand); 267 setOperationAction(ISD::FREM , MVT::f64, Expand); 268 setOperationAction(ISD::FSIN , MVT::f32, Expand); 269 setOperationAction(ISD::FCOS , MVT::f32, Expand); 270 setOperationAction(ISD::FREM , MVT::f32, Expand); 271 272 // Expand FP immediates into loads from the stack, except for the special 273 // cases we handle. 274 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 275 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 276 addLegalFPImmediate(+0.0); // xorps / xorpd 277 } else { 278 // Set up the FP register classes. 279 addRegisterClass(MVT::f64, X86::RFPRegisterClass); 280 281 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 282 283 if (!UnsafeFPMath) { 284 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 285 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 286 } 287 288 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 289 addLegalFPImmediate(+0.0); // FLD0 290 addLegalFPImmediate(+1.0); // FLD1 291 addLegalFPImmediate(-0.0); // FLD0/FCHS 292 addLegalFPImmediate(-1.0); // FLD1/FCHS 293 } 294 295 // First set operation action for all vector types to expand. Then we 296 // will selectively turn on ones that can be effectively codegen'd. 297 for (unsigned VT = (unsigned)MVT::Vector + 1; 298 VT != (unsigned)MVT::LAST_VALUETYPE; VT++) { 299 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); 300 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); 301 setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); 302 setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); 303 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 304 setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); 305 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 306 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 307 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 308 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 309 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 310 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); 311 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); 312 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 313 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 314 } 315 316 if (Subtarget->hasMMX()) { 317 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass); 318 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass); 319 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass); 320 321 // FIXME: add MMX packed arithmetics 322 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Expand); 323 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Expand); 324 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Expand); 325 } 326 327 if (Subtarget->hasSSE1()) { 328 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 329 330 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 331 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 332 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 333 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 334 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 335 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 336 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 337 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 338 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 339 } 340 341 if (Subtarget->hasSSE2()) { 342 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 343 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 344 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 345 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 346 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 347 348 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 349 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 350 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 351 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 352 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 353 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 354 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 355 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 356 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 357 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 358 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 359 360 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 361 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 362 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 363 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 364 // Implement v4f32 insert_vector_elt in terms of SSE2 v8i16 ones. 365 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 366 367 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 368 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 369 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); 370 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); 371 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); 372 } 373 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 374 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 375 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 376 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 377 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 378 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 379 380 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 381 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 382 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); 383 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); 384 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); 385 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); 386 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); 387 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); 388 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); 389 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); 390 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 391 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); 392 } 393 394 // Custom lower v2i64 and v2f64 selects. 395 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 396 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 397 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 398 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 399 } 400 401 // We want to custom lower some of our intrinsics. 402 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 403 404 // We have target-specific dag combine patterns for the following nodes: 405 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 406 setTargetDAGCombine(ISD::SELECT); 407 408 computeRegisterProperties(); 409 410 // FIXME: These should be based on subtarget info. Plus, the values should 411 // be smaller when we are in optimizing for size mode. 412 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores 413 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores 414 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores 415 allowUnalignedMemoryAccesses = true; // x86 supports it! 416} 417 418//===----------------------------------------------------------------------===// 419// C Calling Convention implementation 420//===----------------------------------------------------------------------===// 421 422/// AddLiveIn - This helper function adds the specified physical register to the 423/// MachineFunction as a live in value. It also creates a corresponding virtual 424/// register for it. 425static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg, 426 TargetRegisterClass *RC) { 427 assert(RC->contains(PReg) && "Not the correct regclass!"); 428 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC); 429 MF.addLiveIn(PReg, VReg); 430 return VReg; 431} 432 433/// HowToPassCCCArgument - Returns how an formal argument of the specified type 434/// should be passed. If it is through stack, returns the size of the stack 435/// slot; if it is through XMM register, returns the number of XMM registers 436/// are needed. 437static void 438HowToPassCCCArgument(MVT::ValueType ObjectVT, unsigned NumXMMRegs, 439 unsigned &ObjSize, unsigned &ObjXMMRegs) { 440 ObjXMMRegs = 0; 441 442 switch (ObjectVT) { 443 default: assert(0 && "Unhandled argument type!"); 444 case MVT::i8: ObjSize = 1; break; 445 case MVT::i16: ObjSize = 2; break; 446 case MVT::i32: ObjSize = 4; break; 447 case MVT::i64: ObjSize = 8; break; 448 case MVT::f32: ObjSize = 4; break; 449 case MVT::f64: ObjSize = 8; break; 450 case MVT::v16i8: 451 case MVT::v8i16: 452 case MVT::v4i32: 453 case MVT::v2i64: 454 case MVT::v4f32: 455 case MVT::v2f64: 456 if (NumXMMRegs < 4) 457 ObjXMMRegs = 1; 458 else 459 ObjSize = 16; 460 break; 461 } 462} 463 464SDOperand X86TargetLowering::LowerCCCArguments(SDOperand Op, SelectionDAG &DAG) { 465 unsigned NumArgs = Op.Val->getNumValues() - 1; 466 MachineFunction &MF = DAG.getMachineFunction(); 467 MachineFrameInfo *MFI = MF.getFrameInfo(); 468 SDOperand Root = Op.getOperand(0); 469 std::vector<SDOperand> ArgValues; 470 471 // Add DAG nodes to load the arguments... On entry to a function on the X86, 472 // the stack frame looks like this: 473 // 474 // [ESP] -- return address 475 // [ESP + 4] -- first argument (leftmost lexically) 476 // [ESP + 8] -- second argument, if first argument is <= 4 bytes in size 477 // ... 478 // 479 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot 480 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing. 481 static const unsigned XMMArgRegs[] = { 482 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 483 }; 484 for (unsigned i = 0; i < NumArgs; ++i) { 485 MVT::ValueType ObjectVT = Op.getValue(i).getValueType(); 486 unsigned ArgIncrement = 4; 487 unsigned ObjSize = 0; 488 unsigned ObjXMMRegs = 0; 489 HowToPassCCCArgument(ObjectVT, NumXMMRegs, ObjSize, ObjXMMRegs); 490 if (ObjSize > 4) 491 ArgIncrement = ObjSize; 492 493 SDOperand ArgValue; 494 if (ObjXMMRegs) { 495 // Passed in a XMM register. 496 unsigned Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 497 X86::VR128RegisterClass); 498 ArgValue= DAG.getCopyFromReg(Root, Reg, ObjectVT); 499 ArgValues.push_back(ArgValue); 500 NumXMMRegs += ObjXMMRegs; 501 } else { 502 // XMM arguments have to be aligned on 16-byte boundary. 503 if (ObjSize == 16) 504 ArgOffset = ((ArgOffset + 15) / 16) * 16; 505 // Create the frame index object for this incoming parameter... 506 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); 507 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 508 ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0); 509 ArgValues.push_back(ArgValue); 510 ArgOffset += ArgIncrement; // Move on to the next argument... 511 } 512 } 513 514 ArgValues.push_back(Root); 515 516 // If the function takes variable number of arguments, make a frame index for 517 // the start of the first vararg value... for expansion of llvm.va_start. 518 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 519 if (isVarArg) 520 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset); 521 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only. 522 ReturnAddrIndex = 0; // No return address slot generated yet. 523 BytesToPopOnReturn = 0; // Callee pops nothing. 524 BytesCallerReserves = ArgOffset; 525 526 // If this is a struct return on, the callee pops the hidden struct 527 // pointer. This is common for Darwin/X86, Linux & Mingw32 targets. 528 if (MF.getFunction()->getCallingConv() == CallingConv::CSRet) 529 BytesToPopOnReturn = 4; 530 531 // Return the new list of results. 532 std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(), 533 Op.Val->value_end()); 534 return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size()); 535} 536 537 538SDOperand X86TargetLowering::LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG) { 539 SDOperand Chain = Op.getOperand(0); 540 unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 541 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 542 SDOperand Callee = Op.getOperand(4); 543 MVT::ValueType RetVT= Op.Val->getValueType(0); 544 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 545 546 // Keep track of the number of XMM regs passed so far. 547 unsigned NumXMMRegs = 0; 548 static const unsigned XMMArgRegs[] = { 549 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 550 }; 551 552 // Count how many bytes are to be pushed on the stack. 553 unsigned NumBytes = 0; 554 for (unsigned i = 0; i != NumOps; ++i) { 555 SDOperand Arg = Op.getOperand(5+2*i); 556 557 switch (Arg.getValueType()) { 558 default: assert(0 && "Unexpected ValueType for argument!"); 559 case MVT::i8: 560 case MVT::i16: 561 case MVT::i32: 562 case MVT::f32: 563 NumBytes += 4; 564 break; 565 case MVT::i64: 566 case MVT::f64: 567 NumBytes += 8; 568 break; 569 case MVT::v16i8: 570 case MVT::v8i16: 571 case MVT::v4i32: 572 case MVT::v2i64: 573 case MVT::v4f32: 574 case MVT::v2f64: 575 if (NumXMMRegs < 4) 576 ++NumXMMRegs; 577 else { 578 // XMM arguments have to be aligned on 16-byte boundary. 579 NumBytes = ((NumBytes + 15) / 16) * 16; 580 NumBytes += 16; 581 } 582 break; 583 } 584 } 585 586 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 587 588 // Arguments go on the stack in reverse order, as specified by the ABI. 589 unsigned ArgOffset = 0; 590 NumXMMRegs = 0; 591 std::vector<std::pair<unsigned, SDOperand> > RegsToPass; 592 std::vector<SDOperand> MemOpChains; 593 SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy()); 594 for (unsigned i = 0; i != NumOps; ++i) { 595 SDOperand Arg = Op.getOperand(5+2*i); 596 597 switch (Arg.getValueType()) { 598 default: assert(0 && "Unexpected ValueType for argument!"); 599 case MVT::i8: 600 case MVT::i16: { 601 // Promote the integer to 32 bits. If the input type is signed use a 602 // sign extend, otherwise use a zero extend. 603 unsigned ExtOp = 604 dyn_cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue() ? 605 ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 606 Arg = DAG.getNode(ExtOp, MVT::i32, Arg); 607 } 608 // Fallthrough 609 610 case MVT::i32: 611 case MVT::f32: { 612 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 613 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 614 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 615 ArgOffset += 4; 616 break; 617 } 618 case MVT::i64: 619 case MVT::f64: { 620 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 621 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 622 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 623 ArgOffset += 8; 624 break; 625 } 626 case MVT::v16i8: 627 case MVT::v8i16: 628 case MVT::v4i32: 629 case MVT::v2i64: 630 case MVT::v4f32: 631 case MVT::v2f64: 632 if (NumXMMRegs < 4) { 633 RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg)); 634 NumXMMRegs++; 635 } else { 636 // XMM arguments have to be aligned on 16-byte boundary. 637 ArgOffset = ((ArgOffset + 15) / 16) * 16; 638 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 639 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 640 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 641 ArgOffset += 16; 642 } 643 } 644 } 645 646 if (!MemOpChains.empty()) 647 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 648 &MemOpChains[0], MemOpChains.size()); 649 650 // Build a sequence of copy-to-reg nodes chained together with token chain 651 // and flag operands which copy the outgoing args into registers. 652 SDOperand InFlag; 653 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 654 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 655 InFlag); 656 InFlag = Chain.getValue(1); 657 } 658 659 // If the callee is a GlobalAddress node (quite common, every direct call is) 660 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 661 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 662 // We should use extra load for direct calls to dllimported functions 663 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), true)) 664 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 665 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 666 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 667 668 std::vector<MVT::ValueType> NodeTys; 669 NodeTys.push_back(MVT::Other); // Returns a chain 670 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 671 std::vector<SDOperand> Ops; 672 Ops.push_back(Chain); 673 Ops.push_back(Callee); 674 675 // Add argument registers to the end of the list so that they are known live 676 // into the call. 677 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 678 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 679 RegsToPass[i].second.getValueType())); 680 681 if (InFlag.Val) 682 Ops.push_back(InFlag); 683 684 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, 685 NodeTys, &Ops[0], Ops.size()); 686 InFlag = Chain.getValue(1); 687 688 // Create the CALLSEQ_END node. 689 unsigned NumBytesForCalleeToPush = 0; 690 691 // If this is is a call to a struct-return function, the callee 692 // pops the hidden struct pointer, so we have to push it back. 693 // This is common for Darwin/X86, Linux & Mingw32 targets. 694 if (CallingConv == CallingConv::CSRet) 695 NumBytesForCalleeToPush = 4; 696 697 NodeTys.clear(); 698 NodeTys.push_back(MVT::Other); // Returns a chain 699 if (RetVT != MVT::Other) 700 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 701 Ops.clear(); 702 Ops.push_back(Chain); 703 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 704 Ops.push_back(DAG.getConstant(NumBytesForCalleeToPush, getPointerTy())); 705 Ops.push_back(InFlag); 706 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 707 if (RetVT != MVT::Other) 708 InFlag = Chain.getValue(1); 709 710 std::vector<SDOperand> ResultVals; 711 NodeTys.clear(); 712 switch (RetVT) { 713 default: assert(0 && "Unknown value type to return!"); 714 case MVT::Other: break; 715 case MVT::i8: 716 Chain = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag).getValue(1); 717 ResultVals.push_back(Chain.getValue(0)); 718 NodeTys.push_back(MVT::i8); 719 break; 720 case MVT::i16: 721 Chain = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag).getValue(1); 722 ResultVals.push_back(Chain.getValue(0)); 723 NodeTys.push_back(MVT::i16); 724 break; 725 case MVT::i32: 726 if (Op.Val->getValueType(1) == MVT::i32) { 727 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1); 728 ResultVals.push_back(Chain.getValue(0)); 729 Chain = DAG.getCopyFromReg(Chain, X86::EDX, MVT::i32, 730 Chain.getValue(2)).getValue(1); 731 ResultVals.push_back(Chain.getValue(0)); 732 NodeTys.push_back(MVT::i32); 733 } else { 734 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1); 735 ResultVals.push_back(Chain.getValue(0)); 736 } 737 NodeTys.push_back(MVT::i32); 738 break; 739 case MVT::v16i8: 740 case MVT::v8i16: 741 case MVT::v4i32: 742 case MVT::v2i64: 743 case MVT::v4f32: 744 case MVT::v2f64: 745 Chain = DAG.getCopyFromReg(Chain, X86::XMM0, RetVT, InFlag).getValue(1); 746 ResultVals.push_back(Chain.getValue(0)); 747 NodeTys.push_back(RetVT); 748 break; 749 case MVT::f32: 750 case MVT::f64: { 751 std::vector<MVT::ValueType> Tys; 752 Tys.push_back(MVT::f64); 753 Tys.push_back(MVT::Other); 754 Tys.push_back(MVT::Flag); 755 std::vector<SDOperand> Ops; 756 Ops.push_back(Chain); 757 Ops.push_back(InFlag); 758 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, 759 &Ops[0], Ops.size()); 760 Chain = RetVal.getValue(1); 761 InFlag = RetVal.getValue(2); 762 if (X86ScalarSSE) { 763 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This 764 // shouldn't be necessary except that RFP cannot be live across 765 // multiple blocks. When stackifier is fixed, they can be uncoupled. 766 MachineFunction &MF = DAG.getMachineFunction(); 767 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 768 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 769 Tys.clear(); 770 Tys.push_back(MVT::Other); 771 Ops.clear(); 772 Ops.push_back(Chain); 773 Ops.push_back(RetVal); 774 Ops.push_back(StackSlot); 775 Ops.push_back(DAG.getValueType(RetVT)); 776 Ops.push_back(InFlag); 777 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 778 RetVal = DAG.getLoad(RetVT, Chain, StackSlot, NULL, 0); 779 Chain = RetVal.getValue(1); 780 } 781 782 if (RetVT == MVT::f32 && !X86ScalarSSE) 783 // FIXME: we would really like to remember that this FP_ROUND 784 // operation is okay to eliminate if we allow excess FP precision. 785 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal); 786 ResultVals.push_back(RetVal); 787 NodeTys.push_back(RetVT); 788 break; 789 } 790 } 791 792 // If the function returns void, just return the chain. 793 if (ResultVals.empty()) 794 return Chain; 795 796 // Otherwise, merge everything together with a MERGE_VALUES node. 797 NodeTys.push_back(MVT::Other); 798 ResultVals.push_back(Chain); 799 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, 800 &ResultVals[0], ResultVals.size()); 801 return Res.getValue(Op.ResNo); 802} 803 804 805//===----------------------------------------------------------------------===// 806// X86-64 C Calling Convention implementation 807//===----------------------------------------------------------------------===// 808 809/// HowToPassX86_64CCCArgument - Returns how an formal argument of the specified 810/// type should be passed. If it is through stack, returns the size of the stack 811/// slot; if it is through integer or XMM register, returns the number of 812/// integer or XMM registers are needed. 813static void 814HowToPassX86_64CCCArgument(MVT::ValueType ObjectVT, 815 unsigned NumIntRegs, unsigned NumXMMRegs, 816 unsigned &ObjSize, unsigned &ObjIntRegs, 817 unsigned &ObjXMMRegs) { 818 ObjSize = 0; 819 ObjIntRegs = 0; 820 ObjXMMRegs = 0; 821 822 switch (ObjectVT) { 823 default: assert(0 && "Unhandled argument type!"); 824 case MVT::i8: 825 case MVT::i16: 826 case MVT::i32: 827 case MVT::i64: 828 if (NumIntRegs < 6) 829 ObjIntRegs = 1; 830 else { 831 switch (ObjectVT) { 832 default: break; 833 case MVT::i8: ObjSize = 1; break; 834 case MVT::i16: ObjSize = 2; break; 835 case MVT::i32: ObjSize = 4; break; 836 case MVT::i64: ObjSize = 8; break; 837 } 838 } 839 break; 840 case MVT::f32: 841 case MVT::f64: 842 case MVT::v16i8: 843 case MVT::v8i16: 844 case MVT::v4i32: 845 case MVT::v2i64: 846 case MVT::v4f32: 847 case MVT::v2f64: 848 if (NumXMMRegs < 8) 849 ObjXMMRegs = 1; 850 else { 851 switch (ObjectVT) { 852 default: break; 853 case MVT::f32: ObjSize = 4; break; 854 case MVT::f64: ObjSize = 8; break; 855 case MVT::v16i8: 856 case MVT::v8i16: 857 case MVT::v4i32: 858 case MVT::v2i64: 859 case MVT::v4f32: 860 case MVT::v2f64: ObjSize = 16; break; 861 } 862 break; 863 } 864 } 865} 866 867SDOperand 868X86TargetLowering::LowerX86_64CCCArguments(SDOperand Op, SelectionDAG &DAG) { 869 unsigned NumArgs = Op.Val->getNumValues() - 1; 870 MachineFunction &MF = DAG.getMachineFunction(); 871 MachineFrameInfo *MFI = MF.getFrameInfo(); 872 SDOperand Root = Op.getOperand(0); 873 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 874 std::vector<SDOperand> ArgValues; 875 876 // Add DAG nodes to load the arguments... On entry to a function on the X86, 877 // the stack frame looks like this: 878 // 879 // [RSP] -- return address 880 // [RSP + 8] -- first nonreg argument (leftmost lexically) 881 // [RSP +16] -- second nonreg argument, if 1st argument is <= 8 bytes in size 882 // ... 883 // 884 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot 885 unsigned NumIntRegs = 0; // Int regs used for parameter passing. 886 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing. 887 888 static const unsigned GPR8ArgRegs[] = { 889 X86::DIL, X86::SIL, X86::DL, X86::CL, X86::R8B, X86::R9B 890 }; 891 static const unsigned GPR16ArgRegs[] = { 892 X86::DI, X86::SI, X86::DX, X86::CX, X86::R8W, X86::R9W 893 }; 894 static const unsigned GPR32ArgRegs[] = { 895 X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D 896 }; 897 static const unsigned GPR64ArgRegs[] = { 898 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 899 }; 900 static const unsigned XMMArgRegs[] = { 901 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 902 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 903 }; 904 905 for (unsigned i = 0; i < NumArgs; ++i) { 906 MVT::ValueType ObjectVT = Op.getValue(i).getValueType(); 907 unsigned ArgIncrement = 8; 908 unsigned ObjSize = 0; 909 unsigned ObjIntRegs = 0; 910 unsigned ObjXMMRegs = 0; 911 912 // FIXME: __int128 and long double support? 913 HowToPassX86_64CCCArgument(ObjectVT, NumIntRegs, NumXMMRegs, 914 ObjSize, ObjIntRegs, ObjXMMRegs); 915 if (ObjSize > 8) 916 ArgIncrement = ObjSize; 917 918 unsigned Reg = 0; 919 SDOperand ArgValue; 920 if (ObjIntRegs || ObjXMMRegs) { 921 switch (ObjectVT) { 922 default: assert(0 && "Unhandled argument type!"); 923 case MVT::i8: 924 case MVT::i16: 925 case MVT::i32: 926 case MVT::i64: { 927 TargetRegisterClass *RC = NULL; 928 switch (ObjectVT) { 929 default: break; 930 case MVT::i8: 931 RC = X86::GR8RegisterClass; 932 Reg = GPR8ArgRegs[NumIntRegs]; 933 break; 934 case MVT::i16: 935 RC = X86::GR16RegisterClass; 936 Reg = GPR16ArgRegs[NumIntRegs]; 937 break; 938 case MVT::i32: 939 RC = X86::GR32RegisterClass; 940 Reg = GPR32ArgRegs[NumIntRegs]; 941 break; 942 case MVT::i64: 943 RC = X86::GR64RegisterClass; 944 Reg = GPR64ArgRegs[NumIntRegs]; 945 break; 946 } 947 Reg = AddLiveIn(MF, Reg, RC); 948 ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT); 949 break; 950 } 951 case MVT::f32: 952 case MVT::f64: 953 case MVT::v16i8: 954 case MVT::v8i16: 955 case MVT::v4i32: 956 case MVT::v2i64: 957 case MVT::v4f32: 958 case MVT::v2f64: { 959 TargetRegisterClass *RC= (ObjectVT == MVT::f32) ? 960 X86::FR32RegisterClass : ((ObjectVT == MVT::f64) ? 961 X86::FR64RegisterClass : X86::VR128RegisterClass); 962 Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], RC); 963 ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT); 964 break; 965 } 966 } 967 NumIntRegs += ObjIntRegs; 968 NumXMMRegs += ObjXMMRegs; 969 } else if (ObjSize) { 970 // XMM arguments have to be aligned on 16-byte boundary. 971 if (ObjSize == 16) 972 ArgOffset = ((ArgOffset + 15) / 16) * 16; 973 // Create the SelectionDAG nodes corresponding to a load from this 974 // parameter. 975 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); 976 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 977 ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0); 978 ArgOffset += ArgIncrement; // Move on to the next argument. 979 } 980 981 ArgValues.push_back(ArgValue); 982 } 983 984 // If the function takes variable number of arguments, make a frame index for 985 // the start of the first vararg value... for expansion of llvm.va_start. 986 if (isVarArg) { 987 // For X86-64, if there are vararg parameters that are passed via 988 // registers, then we must store them to their spots on the stack so they 989 // may be loaded by deferencing the result of va_next. 990 VarArgsGPOffset = NumIntRegs * 8; 991 VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16; 992 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset); 993 RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16); 994 995 // Store the integer parameter registers. 996 std::vector<SDOperand> MemOps; 997 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 998 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 999 DAG.getConstant(VarArgsGPOffset, getPointerTy())); 1000 for (; NumIntRegs != 6; ++NumIntRegs) { 1001 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], 1002 X86::GR64RegisterClass); 1003 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1004 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1005 MemOps.push_back(Store); 1006 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1007 DAG.getConstant(8, getPointerTy())); 1008 } 1009 1010 // Now store the XMM (fp + vector) parameter registers. 1011 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1012 DAG.getConstant(VarArgsFPOffset, getPointerTy())); 1013 for (; NumXMMRegs != 8; ++NumXMMRegs) { 1014 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 1015 X86::VR128RegisterClass); 1016 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); 1017 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1018 MemOps.push_back(Store); 1019 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1020 DAG.getConstant(16, getPointerTy())); 1021 } 1022 if (!MemOps.empty()) 1023 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 1024 &MemOps[0], MemOps.size()); 1025 } 1026 1027 ArgValues.push_back(Root); 1028 1029 ReturnAddrIndex = 0; // No return address slot generated yet. 1030 BytesToPopOnReturn = 0; // Callee pops nothing. 1031 BytesCallerReserves = ArgOffset; 1032 1033 // Return the new list of results. 1034 std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(), 1035 Op.Val->value_end()); 1036 return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size()); 1037} 1038 1039SDOperand 1040X86TargetLowering::LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG) { 1041 SDOperand Chain = Op.getOperand(0); 1042 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1043 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 1044 SDOperand Callee = Op.getOperand(4); 1045 MVT::ValueType RetVT= Op.Val->getValueType(0); 1046 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 1047 1048 // Count how many bytes are to be pushed on the stack. 1049 unsigned NumBytes = 0; 1050 unsigned NumIntRegs = 0; // Int regs used for parameter passing. 1051 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing. 1052 1053 static const unsigned GPR8ArgRegs[] = { 1054 X86::DIL, X86::SIL, X86::DL, X86::CL, X86::R8B, X86::R9B 1055 }; 1056 static const unsigned GPR16ArgRegs[] = { 1057 X86::DI, X86::SI, X86::DX, X86::CX, X86::R8W, X86::R9W 1058 }; 1059 static const unsigned GPR32ArgRegs[] = { 1060 X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D 1061 }; 1062 static const unsigned GPR64ArgRegs[] = { 1063 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1064 }; 1065 static const unsigned XMMArgRegs[] = { 1066 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1067 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1068 }; 1069 1070 for (unsigned i = 0; i != NumOps; ++i) { 1071 SDOperand Arg = Op.getOperand(5+2*i); 1072 MVT::ValueType ArgVT = Arg.getValueType(); 1073 1074 switch (ArgVT) { 1075 default: assert(0 && "Unknown value type!"); 1076 case MVT::i8: 1077 case MVT::i16: 1078 case MVT::i32: 1079 case MVT::i64: 1080 if (NumIntRegs < 6) 1081 ++NumIntRegs; 1082 else 1083 NumBytes += 8; 1084 break; 1085 case MVT::f32: 1086 case MVT::f64: 1087 case MVT::v16i8: 1088 case MVT::v8i16: 1089 case MVT::v4i32: 1090 case MVT::v2i64: 1091 case MVT::v4f32: 1092 case MVT::v2f64: 1093 if (NumXMMRegs < 8) 1094 NumXMMRegs++; 1095 else if (ArgVT == MVT::f32 || ArgVT == MVT::f64) 1096 NumBytes += 8; 1097 else { 1098 // XMM arguments have to be aligned on 16-byte boundary. 1099 NumBytes = ((NumBytes + 15) / 16) * 16; 1100 NumBytes += 16; 1101 } 1102 break; 1103 } 1104 } 1105 1106 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 1107 1108 // Arguments go on the stack in reverse order, as specified by the ABI. 1109 unsigned ArgOffset = 0; 1110 NumIntRegs = 0; 1111 NumXMMRegs = 0; 1112 std::vector<std::pair<unsigned, SDOperand> > RegsToPass; 1113 std::vector<SDOperand> MemOpChains; 1114 SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy()); 1115 for (unsigned i = 0; i != NumOps; ++i) { 1116 SDOperand Arg = Op.getOperand(5+2*i); 1117 MVT::ValueType ArgVT = Arg.getValueType(); 1118 1119 switch (ArgVT) { 1120 default: assert(0 && "Unexpected ValueType for argument!"); 1121 case MVT::i8: 1122 case MVT::i16: 1123 case MVT::i32: 1124 case MVT::i64: 1125 if (NumIntRegs < 6) { 1126 unsigned Reg = 0; 1127 switch (ArgVT) { 1128 default: break; 1129 case MVT::i8: Reg = GPR8ArgRegs[NumIntRegs]; break; 1130 case MVT::i16: Reg = GPR16ArgRegs[NumIntRegs]; break; 1131 case MVT::i32: Reg = GPR32ArgRegs[NumIntRegs]; break; 1132 case MVT::i64: Reg = GPR64ArgRegs[NumIntRegs]; break; 1133 } 1134 RegsToPass.push_back(std::make_pair(Reg, Arg)); 1135 ++NumIntRegs; 1136 } else { 1137 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1138 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1139 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1140 ArgOffset += 8; 1141 } 1142 break; 1143 case MVT::f32: 1144 case MVT::f64: 1145 case MVT::v16i8: 1146 case MVT::v8i16: 1147 case MVT::v4i32: 1148 case MVT::v2i64: 1149 case MVT::v4f32: 1150 case MVT::v2f64: 1151 if (NumXMMRegs < 8) { 1152 RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg)); 1153 NumXMMRegs++; 1154 } else { 1155 if (ArgVT != MVT::f32 && ArgVT != MVT::f64) { 1156 // XMM arguments have to be aligned on 16-byte boundary. 1157 ArgOffset = ((ArgOffset + 15) / 16) * 16; 1158 } 1159 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1160 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1161 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1162 if (ArgVT == MVT::f32 || ArgVT == MVT::f64) 1163 ArgOffset += 8; 1164 else 1165 ArgOffset += 16; 1166 } 1167 } 1168 } 1169 1170 if (!MemOpChains.empty()) 1171 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1172 &MemOpChains[0], MemOpChains.size()); 1173 1174 // Build a sequence of copy-to-reg nodes chained together with token chain 1175 // and flag operands which copy the outgoing args into registers. 1176 SDOperand InFlag; 1177 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1178 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1179 InFlag); 1180 InFlag = Chain.getValue(1); 1181 } 1182 1183 if (isVarArg) { 1184 // From AMD64 ABI document: 1185 // For calls that may call functions that use varargs or stdargs 1186 // (prototype-less calls or calls to functions containing ellipsis (...) in 1187 // the declaration) %al is used as hidden argument to specify the number 1188 // of SSE registers used. The contents of %al do not need to match exactly 1189 // the number of registers, but must be an ubound on the number of SSE 1190 // registers used and is in the range 0 - 8 inclusive. 1191 Chain = DAG.getCopyToReg(Chain, X86::AL, 1192 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 1193 InFlag = Chain.getValue(1); 1194 } 1195 1196 // If the callee is a GlobalAddress node (quite common, every direct call is) 1197 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1198 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1199 // We should use extra load for direct calls to dllimported functions 1200 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), true)) 1201 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1202 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1203 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1204 1205 std::vector<MVT::ValueType> NodeTys; 1206 NodeTys.push_back(MVT::Other); // Returns a chain 1207 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 1208 std::vector<SDOperand> Ops; 1209 Ops.push_back(Chain); 1210 Ops.push_back(Callee); 1211 1212 // Add argument registers to the end of the list so that they are known live 1213 // into the call. 1214 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1215 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1216 RegsToPass[i].second.getValueType())); 1217 1218 if (InFlag.Val) 1219 Ops.push_back(InFlag); 1220 1221 // FIXME: Do not generate X86ISD::TAILCALL for now. 1222 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, 1223 NodeTys, &Ops[0], Ops.size()); 1224 InFlag = Chain.getValue(1); 1225 1226 NodeTys.clear(); 1227 NodeTys.push_back(MVT::Other); // Returns a chain 1228 if (RetVT != MVT::Other) 1229 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 1230 Ops.clear(); 1231 Ops.push_back(Chain); 1232 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1233 Ops.push_back(DAG.getConstant(0, getPointerTy())); 1234 Ops.push_back(InFlag); 1235 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1236 if (RetVT != MVT::Other) 1237 InFlag = Chain.getValue(1); 1238 1239 std::vector<SDOperand> ResultVals; 1240 NodeTys.clear(); 1241 switch (RetVT) { 1242 default: assert(0 && "Unknown value type to return!"); 1243 case MVT::Other: break; 1244 case MVT::i8: 1245 Chain = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag).getValue(1); 1246 ResultVals.push_back(Chain.getValue(0)); 1247 NodeTys.push_back(MVT::i8); 1248 break; 1249 case MVT::i16: 1250 Chain = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag).getValue(1); 1251 ResultVals.push_back(Chain.getValue(0)); 1252 NodeTys.push_back(MVT::i16); 1253 break; 1254 case MVT::i32: 1255 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1); 1256 ResultVals.push_back(Chain.getValue(0)); 1257 NodeTys.push_back(MVT::i32); 1258 break; 1259 case MVT::i64: 1260 if (Op.Val->getValueType(1) == MVT::i64) { 1261 // FIXME: __int128 support? 1262 Chain = DAG.getCopyFromReg(Chain, X86::RAX, MVT::i64, InFlag).getValue(1); 1263 ResultVals.push_back(Chain.getValue(0)); 1264 Chain = DAG.getCopyFromReg(Chain, X86::RDX, MVT::i64, 1265 Chain.getValue(2)).getValue(1); 1266 ResultVals.push_back(Chain.getValue(0)); 1267 NodeTys.push_back(MVT::i64); 1268 } else { 1269 Chain = DAG.getCopyFromReg(Chain, X86::RAX, MVT::i64, InFlag).getValue(1); 1270 ResultVals.push_back(Chain.getValue(0)); 1271 } 1272 NodeTys.push_back(MVT::i64); 1273 break; 1274 case MVT::f32: 1275 case MVT::f64: 1276 case MVT::v16i8: 1277 case MVT::v8i16: 1278 case MVT::v4i32: 1279 case MVT::v2i64: 1280 case MVT::v4f32: 1281 case MVT::v2f64: 1282 // FIXME: long double support? 1283 Chain = DAG.getCopyFromReg(Chain, X86::XMM0, RetVT, InFlag).getValue(1); 1284 ResultVals.push_back(Chain.getValue(0)); 1285 NodeTys.push_back(RetVT); 1286 break; 1287 } 1288 1289 // If the function returns void, just return the chain. 1290 if (ResultVals.empty()) 1291 return Chain; 1292 1293 // Otherwise, merge everything together with a MERGE_VALUES node. 1294 NodeTys.push_back(MVT::Other); 1295 ResultVals.push_back(Chain); 1296 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, 1297 &ResultVals[0], ResultVals.size()); 1298 return Res.getValue(Op.ResNo); 1299} 1300 1301//===----------------------------------------------------------------------===// 1302// Fast Calling Convention implementation 1303//===----------------------------------------------------------------------===// 1304// 1305// The X86 'fast' calling convention passes up to two integer arguments in 1306// registers (an appropriate portion of EAX/EDX), passes arguments in C order, 1307// and requires that the callee pop its arguments off the stack (allowing proper 1308// tail calls), and has the same return value conventions as C calling convs. 1309// 1310// This calling convention always arranges for the callee pop value to be 8n+4 1311// bytes, which is needed for tail recursion elimination and stack alignment 1312// reasons. 1313// 1314// Note that this can be enhanced in the future to pass fp vals in registers 1315// (when we have a global fp allocator) and do other tricks. 1316// 1317 1318/// HowToPassFastCCArgument - Returns how an formal argument of the specified 1319/// type should be passed. If it is through stack, returns the size of the stack 1320/// slot; if it is through integer or XMM register, returns the number of 1321/// integer or XMM registers are needed. 1322static void 1323HowToPassFastCCArgument(MVT::ValueType ObjectVT, 1324 unsigned NumIntRegs, unsigned NumXMMRegs, 1325 unsigned &ObjSize, unsigned &ObjIntRegs, 1326 unsigned &ObjXMMRegs) { 1327 ObjSize = 0; 1328 ObjIntRegs = 0; 1329 ObjXMMRegs = 0; 1330 1331 switch (ObjectVT) { 1332 default: assert(0 && "Unhandled argument type!"); 1333 case MVT::i8: 1334#if FASTCC_NUM_INT_ARGS_INREGS > 0 1335 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) 1336 ObjIntRegs = 1; 1337 else 1338#endif 1339 ObjSize = 1; 1340 break; 1341 case MVT::i16: 1342#if FASTCC_NUM_INT_ARGS_INREGS > 0 1343 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) 1344 ObjIntRegs = 1; 1345 else 1346#endif 1347 ObjSize = 2; 1348 break; 1349 case MVT::i32: 1350#if FASTCC_NUM_INT_ARGS_INREGS > 0 1351 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) 1352 ObjIntRegs = 1; 1353 else 1354#endif 1355 ObjSize = 4; 1356 break; 1357 case MVT::i64: 1358#if FASTCC_NUM_INT_ARGS_INREGS > 0 1359 if (NumIntRegs+2 <= FASTCC_NUM_INT_ARGS_INREGS) { 1360 ObjIntRegs = 2; 1361 } else if (NumIntRegs+1 <= FASTCC_NUM_INT_ARGS_INREGS) { 1362 ObjIntRegs = 1; 1363 ObjSize = 4; 1364 } else 1365#endif 1366 ObjSize = 8; 1367 case MVT::f32: 1368 ObjSize = 4; 1369 break; 1370 case MVT::f64: 1371 ObjSize = 8; 1372 break; 1373 case MVT::v16i8: 1374 case MVT::v8i16: 1375 case MVT::v4i32: 1376 case MVT::v2i64: 1377 case MVT::v4f32: 1378 case MVT::v2f64: 1379 if (NumXMMRegs < 4) 1380 ObjXMMRegs = 1; 1381 else 1382 ObjSize = 16; 1383 break; 1384 } 1385} 1386 1387SDOperand 1388X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG) { 1389 unsigned NumArgs = Op.Val->getNumValues()-1; 1390 MachineFunction &MF = DAG.getMachineFunction(); 1391 MachineFrameInfo *MFI = MF.getFrameInfo(); 1392 SDOperand Root = Op.getOperand(0); 1393 std::vector<SDOperand> ArgValues; 1394 1395 // Add DAG nodes to load the arguments... On entry to a function the stack 1396 // frame looks like this: 1397 // 1398 // [ESP] -- return address 1399 // [ESP + 4] -- first nonreg argument (leftmost lexically) 1400 // [ESP + 8] -- second nonreg argument, if 1st argument is <= 4 bytes in size 1401 // ... 1402 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot 1403 1404 // Keep track of the number of integer regs passed so far. This can be either 1405 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both 1406 // used). 1407 unsigned NumIntRegs = 0; 1408 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing. 1409 1410 static const unsigned XMMArgRegs[] = { 1411 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 1412 }; 1413 1414 for (unsigned i = 0; i < NumArgs; ++i) { 1415 MVT::ValueType ObjectVT = Op.getValue(i).getValueType(); 1416 unsigned ArgIncrement = 4; 1417 unsigned ObjSize = 0; 1418 unsigned ObjIntRegs = 0; 1419 unsigned ObjXMMRegs = 0; 1420 1421 HowToPassFastCCArgument(ObjectVT, NumIntRegs, NumXMMRegs, 1422 ObjSize, ObjIntRegs, ObjXMMRegs); 1423 if (ObjSize > 4) 1424 ArgIncrement = ObjSize; 1425 1426 unsigned Reg = 0; 1427 SDOperand ArgValue; 1428 if (ObjIntRegs || ObjXMMRegs) { 1429 switch (ObjectVT) { 1430 default: assert(0 && "Unhandled argument type!"); 1431 case MVT::i8: 1432 Reg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL, 1433 X86::GR8RegisterClass); 1434 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i8); 1435 break; 1436 case MVT::i16: 1437 Reg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX, 1438 X86::GR16RegisterClass); 1439 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i16); 1440 break; 1441 case MVT::i32: 1442 Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX, 1443 X86::GR32RegisterClass); 1444 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i32); 1445 break; 1446 case MVT::i64: 1447 Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX, 1448 X86::GR32RegisterClass); 1449 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i32); 1450 if (ObjIntRegs == 2) { 1451 Reg = AddLiveIn(MF, X86::EDX, X86::GR32RegisterClass); 1452 SDOperand ArgValue2 = DAG.getCopyFromReg(Root, Reg, MVT::i32); 1453 ArgValue= DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2); 1454 } 1455 break; 1456 case MVT::v16i8: 1457 case MVT::v8i16: 1458 case MVT::v4i32: 1459 case MVT::v2i64: 1460 case MVT::v4f32: 1461 case MVT::v2f64: 1462 Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], X86::VR128RegisterClass); 1463 ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT); 1464 break; 1465 } 1466 NumIntRegs += ObjIntRegs; 1467 NumXMMRegs += ObjXMMRegs; 1468 } 1469 1470 if (ObjSize) { 1471 // XMM arguments have to be aligned on 16-byte boundary. 1472 if (ObjSize == 16) 1473 ArgOffset = ((ArgOffset + 15) / 16) * 16; 1474 // Create the SelectionDAG nodes corresponding to a load from this 1475 // parameter. 1476 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); 1477 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 1478 if (ObjectVT == MVT::i64 && ObjIntRegs) { 1479 SDOperand ArgValue2 = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, 1480 NULL, 0); 1481 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2); 1482 } else 1483 ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0); 1484 ArgOffset += ArgIncrement; // Move on to the next argument. 1485 } 1486 1487 ArgValues.push_back(ArgValue); 1488 } 1489 1490 ArgValues.push_back(Root); 1491 1492 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1493 // arguments and the arguments after the retaddr has been pushed are aligned. 1494 if ((ArgOffset & 7) == 0) 1495 ArgOffset += 4; 1496 1497 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 1498 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only. 1499 ReturnAddrIndex = 0; // No return address slot generated yet. 1500 BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments. 1501 BytesCallerReserves = 0; 1502 1503 // Finally, inform the code generator which regs we return values in. 1504 switch (getValueType(MF.getFunction()->getReturnType())) { 1505 default: assert(0 && "Unknown type!"); 1506 case MVT::isVoid: break; 1507 case MVT::i1: 1508 case MVT::i8: 1509 case MVT::i16: 1510 case MVT::i32: 1511 MF.addLiveOut(X86::EAX); 1512 break; 1513 case MVT::i64: 1514 MF.addLiveOut(X86::EAX); 1515 MF.addLiveOut(X86::EDX); 1516 break; 1517 case MVT::f32: 1518 case MVT::f64: 1519 MF.addLiveOut(X86::ST0); 1520 break; 1521 case MVT::v16i8: 1522 case MVT::v8i16: 1523 case MVT::v4i32: 1524 case MVT::v2i64: 1525 case MVT::v4f32: 1526 case MVT::v2f64: 1527 MF.addLiveOut(X86::XMM0); 1528 break; 1529 } 1530 1531 // Return the new list of results. 1532 std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(), 1533 Op.Val->value_end()); 1534 return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size()); 1535} 1536 1537SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG, 1538 bool isFastCall) { 1539 SDOperand Chain = Op.getOperand(0); 1540 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 1541 SDOperand Callee = Op.getOperand(4); 1542 MVT::ValueType RetVT= Op.Val->getValueType(0); 1543 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 1544 1545 // Count how many bytes are to be pushed on the stack. 1546 unsigned NumBytes = 0; 1547 1548 // Keep track of the number of integer regs passed so far. This can be either 1549 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both 1550 // used). 1551 unsigned NumIntRegs = 0; 1552 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing. 1553 1554 static const unsigned GPRArgRegs[][2] = { 1555 { X86::AL, X86::DL }, 1556 { X86::AX, X86::DX }, 1557 { X86::EAX, X86::EDX } 1558 }; 1559#if 0 1560 static const unsigned FastCallGPRArgRegs[][2] = { 1561 { X86::CL, X86::DL }, 1562 { X86::CX, X86::DX }, 1563 { X86::ECX, X86::EDX } 1564 }; 1565#endif 1566 static const unsigned XMMArgRegs[] = { 1567 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 1568 }; 1569 1570 for (unsigned i = 0; i != NumOps; ++i) { 1571 SDOperand Arg = Op.getOperand(5+2*i); 1572 1573 switch (Arg.getValueType()) { 1574 default: assert(0 && "Unknown value type!"); 1575 case MVT::i8: 1576 case MVT::i16: 1577 case MVT::i32: { 1578 unsigned MaxNumIntRegs = (isFastCall ? 2 : FASTCC_NUM_INT_ARGS_INREGS); 1579 if (NumIntRegs < MaxNumIntRegs) { 1580 ++NumIntRegs; 1581 break; 1582 } 1583 } // Fall through 1584 case MVT::f32: 1585 NumBytes += 4; 1586 break; 1587 case MVT::f64: 1588 NumBytes += 8; 1589 break; 1590 case MVT::v16i8: 1591 case MVT::v8i16: 1592 case MVT::v4i32: 1593 case MVT::v2i64: 1594 case MVT::v4f32: 1595 case MVT::v2f64: 1596 if (isFastCall) { 1597 assert(0 && "Unknown value type!"); 1598 } else { 1599 if (NumXMMRegs < 4) 1600 NumXMMRegs++; 1601 else { 1602 // XMM arguments have to be aligned on 16-byte boundary. 1603 NumBytes = ((NumBytes + 15) / 16) * 16; 1604 NumBytes += 16; 1605 } 1606 } 1607 break; 1608 } 1609 } 1610 1611 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1612 // arguments and the arguments after the retaddr has been pushed are aligned. 1613 if ((NumBytes & 7) == 0) 1614 NumBytes += 4; 1615 1616 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 1617 1618 // Arguments go on the stack in reverse order, as specified by the ABI. 1619 unsigned ArgOffset = 0; 1620 NumIntRegs = 0; 1621 std::vector<std::pair<unsigned, SDOperand> > RegsToPass; 1622 std::vector<SDOperand> MemOpChains; 1623 SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy()); 1624 for (unsigned i = 0; i != NumOps; ++i) { 1625 SDOperand Arg = Op.getOperand(5+2*i); 1626 1627 switch (Arg.getValueType()) { 1628 default: assert(0 && "Unexpected ValueType for argument!"); 1629 case MVT::i8: 1630 case MVT::i16: 1631 case MVT::i32: { 1632 unsigned MaxNumIntRegs = (isFastCall ? 2 : FASTCC_NUM_INT_ARGS_INREGS); 1633 if (NumIntRegs < MaxNumIntRegs) { 1634 RegsToPass.push_back( 1635 std::make_pair(GPRArgRegs[Arg.getValueType()-MVT::i8][NumIntRegs], 1636 Arg)); 1637 ++NumIntRegs; 1638 break; 1639 } 1640 } // Fall through 1641 case MVT::f32: { 1642 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1643 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1644 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1645 ArgOffset += 4; 1646 break; 1647 } 1648 case MVT::f64: { 1649 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1650 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1651 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1652 ArgOffset += 8; 1653 break; 1654 } 1655 case MVT::v16i8: 1656 case MVT::v8i16: 1657 case MVT::v4i32: 1658 case MVT::v2i64: 1659 case MVT::v4f32: 1660 case MVT::v2f64: 1661 if (isFastCall) { 1662 assert(0 && "Unexpected ValueType for argument!"); 1663 } else { 1664 if (NumXMMRegs < 4) { 1665 RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg)); 1666 NumXMMRegs++; 1667 } else { 1668 // XMM arguments have to be aligned on 16-byte boundary. 1669 ArgOffset = ((ArgOffset + 15) / 16) * 16; 1670 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1671 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1672 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1673 ArgOffset += 16; 1674 } 1675 } 1676 break; 1677 } 1678 } 1679 1680 if (!MemOpChains.empty()) 1681 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1682 &MemOpChains[0], MemOpChains.size()); 1683 1684 // Build a sequence of copy-to-reg nodes chained together with token chain 1685 // and flag operands which copy the outgoing args into registers. 1686 SDOperand InFlag; 1687 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1688 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1689 InFlag); 1690 InFlag = Chain.getValue(1); 1691 } 1692 1693 // If the callee is a GlobalAddress node (quite common, every direct call is) 1694 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1695 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1696 // We should use extra load for direct calls to dllimported functions 1697 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), true)) 1698 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1699 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1700 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1701 1702 std::vector<MVT::ValueType> NodeTys; 1703 NodeTys.push_back(MVT::Other); // Returns a chain 1704 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 1705 std::vector<SDOperand> Ops; 1706 Ops.push_back(Chain); 1707 Ops.push_back(Callee); 1708 1709 // Add argument registers to the end of the list so that they are known live 1710 // into the call. 1711 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1712 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1713 RegsToPass[i].second.getValueType())); 1714 1715 if (InFlag.Val) 1716 Ops.push_back(InFlag); 1717 1718 // FIXME: Do not generate X86ISD::TAILCALL for now. 1719 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, 1720 NodeTys, &Ops[0], Ops.size()); 1721 InFlag = Chain.getValue(1); 1722 1723 NodeTys.clear(); 1724 NodeTys.push_back(MVT::Other); // Returns a chain 1725 if (RetVT != MVT::Other) 1726 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 1727 Ops.clear(); 1728 Ops.push_back(Chain); 1729 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1730 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1731 Ops.push_back(InFlag); 1732 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1733 if (RetVT != MVT::Other) 1734 InFlag = Chain.getValue(1); 1735 1736 std::vector<SDOperand> ResultVals; 1737 NodeTys.clear(); 1738 switch (RetVT) { 1739 default: assert(0 && "Unknown value type to return!"); 1740 case MVT::Other: break; 1741 case MVT::i8: 1742 Chain = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag).getValue(1); 1743 ResultVals.push_back(Chain.getValue(0)); 1744 NodeTys.push_back(MVT::i8); 1745 break; 1746 case MVT::i16: 1747 Chain = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag).getValue(1); 1748 ResultVals.push_back(Chain.getValue(0)); 1749 NodeTys.push_back(MVT::i16); 1750 break; 1751 case MVT::i32: 1752 if (Op.Val->getValueType(1) == MVT::i32) { 1753 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1); 1754 ResultVals.push_back(Chain.getValue(0)); 1755 Chain = DAG.getCopyFromReg(Chain, X86::EDX, MVT::i32, 1756 Chain.getValue(2)).getValue(1); 1757 ResultVals.push_back(Chain.getValue(0)); 1758 NodeTys.push_back(MVT::i32); 1759 } else { 1760 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1); 1761 ResultVals.push_back(Chain.getValue(0)); 1762 } 1763 NodeTys.push_back(MVT::i32); 1764 break; 1765 case MVT::v16i8: 1766 case MVT::v8i16: 1767 case MVT::v4i32: 1768 case MVT::v2i64: 1769 case MVT::v4f32: 1770 case MVT::v2f64: 1771 if (isFastCall) { 1772 assert(0 && "Unknown value type to return!"); 1773 } else { 1774 Chain = DAG.getCopyFromReg(Chain, X86::XMM0, RetVT, InFlag).getValue(1); 1775 ResultVals.push_back(Chain.getValue(0)); 1776 NodeTys.push_back(RetVT); 1777 } 1778 break; 1779 case MVT::f32: 1780 case MVT::f64: { 1781 std::vector<MVT::ValueType> Tys; 1782 Tys.push_back(MVT::f64); 1783 Tys.push_back(MVT::Other); 1784 Tys.push_back(MVT::Flag); 1785 std::vector<SDOperand> Ops; 1786 Ops.push_back(Chain); 1787 Ops.push_back(InFlag); 1788 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, 1789 &Ops[0], Ops.size()); 1790 Chain = RetVal.getValue(1); 1791 InFlag = RetVal.getValue(2); 1792 if (X86ScalarSSE) { 1793 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This 1794 // shouldn't be necessary except that RFP cannot be live across 1795 // multiple blocks. When stackifier is fixed, they can be uncoupled. 1796 MachineFunction &MF = DAG.getMachineFunction(); 1797 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 1798 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 1799 Tys.clear(); 1800 Tys.push_back(MVT::Other); 1801 Ops.clear(); 1802 Ops.push_back(Chain); 1803 Ops.push_back(RetVal); 1804 Ops.push_back(StackSlot); 1805 Ops.push_back(DAG.getValueType(RetVT)); 1806 Ops.push_back(InFlag); 1807 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 1808 RetVal = DAG.getLoad(RetVT, Chain, StackSlot, NULL, 0); 1809 Chain = RetVal.getValue(1); 1810 } 1811 1812 if (RetVT == MVT::f32 && !X86ScalarSSE) 1813 // FIXME: we would really like to remember that this FP_ROUND 1814 // operation is okay to eliminate if we allow excess FP precision. 1815 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal); 1816 ResultVals.push_back(RetVal); 1817 NodeTys.push_back(RetVT); 1818 break; 1819 } 1820 } 1821 1822 1823 // If the function returns void, just return the chain. 1824 if (ResultVals.empty()) 1825 return Chain; 1826 1827 // Otherwise, merge everything together with a MERGE_VALUES node. 1828 NodeTys.push_back(MVT::Other); 1829 ResultVals.push_back(Chain); 1830 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, 1831 &ResultVals[0], ResultVals.size()); 1832 return Res.getValue(Op.ResNo); 1833} 1834 1835//===----------------------------------------------------------------------===// 1836// StdCall Calling Convention implementation 1837//===----------------------------------------------------------------------===// 1838// StdCall calling convention seems to be standard for many Windows' API 1839// routines and around. It differs from C calling convention just a little: 1840// callee should clean up the stack, not caller. Symbols should be also 1841// decorated in some fancy way :) It doesn't support any vector arguments. 1842 1843/// HowToPassStdCallCCArgument - Returns how an formal argument of the specified 1844/// type should be passed. Returns the size of the stack slot 1845static void 1846HowToPassStdCallCCArgument(MVT::ValueType ObjectVT, unsigned &ObjSize) { 1847 switch (ObjectVT) { 1848 default: assert(0 && "Unhandled argument type!"); 1849 case MVT::i8: ObjSize = 1; break; 1850 case MVT::i16: ObjSize = 2; break; 1851 case MVT::i32: ObjSize = 4; break; 1852 case MVT::i64: ObjSize = 8; break; 1853 case MVT::f32: ObjSize = 4; break; 1854 case MVT::f64: ObjSize = 8; break; 1855 } 1856} 1857 1858SDOperand X86TargetLowering::LowerStdCallCCArguments(SDOperand Op, 1859 SelectionDAG &DAG) { 1860 unsigned NumArgs = Op.Val->getNumValues() - 1; 1861 MachineFunction &MF = DAG.getMachineFunction(); 1862 MachineFrameInfo *MFI = MF.getFrameInfo(); 1863 SDOperand Root = Op.getOperand(0); 1864 std::vector<SDOperand> ArgValues; 1865 1866 // Add DAG nodes to load the arguments... On entry to a function on the X86, 1867 // the stack frame looks like this: 1868 // 1869 // [ESP] -- return address 1870 // [ESP + 4] -- first argument (leftmost lexically) 1871 // [ESP + 8] -- second argument, if first argument is <= 4 bytes in size 1872 // ... 1873 // 1874 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot 1875 for (unsigned i = 0; i < NumArgs; ++i) { 1876 MVT::ValueType ObjectVT = Op.getValue(i).getValueType(); 1877 unsigned ArgIncrement = 4; 1878 unsigned ObjSize = 0; 1879 HowToPassStdCallCCArgument(ObjectVT, ObjSize); 1880 if (ObjSize > 4) 1881 ArgIncrement = ObjSize; 1882 1883 SDOperand ArgValue; 1884 // Create the frame index object for this incoming parameter... 1885 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); 1886 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 1887 ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0); 1888 ArgValues.push_back(ArgValue); 1889 ArgOffset += ArgIncrement; // Move on to the next argument... 1890 } 1891 1892 ArgValues.push_back(Root); 1893 1894 // If the function takes variable number of arguments, make a frame index for 1895 // the start of the first vararg value... for expansion of llvm.va_start. 1896 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1897 if (isVarArg) { 1898 BytesToPopOnReturn = 0; // Callee pops nothing. 1899 BytesCallerReserves = ArgOffset; 1900 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset); 1901 } else { 1902 BytesToPopOnReturn = ArgOffset; // Callee pops everything.. 1903 BytesCallerReserves = 0; 1904 } 1905 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only. 1906 ReturnAddrIndex = 0; // No return address slot generated yet. 1907 1908 MF.getInfo<X86FunctionInfo>()->setBytesToPopOnReturn(BytesToPopOnReturn); 1909 1910 // Return the new list of results. 1911 std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(), 1912 Op.Val->value_end()); 1913 return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size()); 1914} 1915 1916 1917SDOperand X86TargetLowering::LowerStdCallCCCallTo(SDOperand Op, 1918 SelectionDAG &DAG) { 1919 SDOperand Chain = Op.getOperand(0); 1920 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1921 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 1922 SDOperand Callee = Op.getOperand(4); 1923 MVT::ValueType RetVT= Op.Val->getValueType(0); 1924 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 1925 1926 // Count how many bytes are to be pushed on the stack. 1927 unsigned NumBytes = 0; 1928 for (unsigned i = 0; i != NumOps; ++i) { 1929 SDOperand Arg = Op.getOperand(5+2*i); 1930 1931 switch (Arg.getValueType()) { 1932 default: assert(0 && "Unexpected ValueType for argument!"); 1933 case MVT::i8: 1934 case MVT::i16: 1935 case MVT::i32: 1936 case MVT::f32: 1937 NumBytes += 4; 1938 break; 1939 case MVT::i64: 1940 case MVT::f64: 1941 NumBytes += 8; 1942 break; 1943 } 1944 } 1945 1946 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 1947 1948 // Arguments go on the stack in reverse order, as specified by the ABI. 1949 unsigned ArgOffset = 0; 1950 std::vector<SDOperand> MemOpChains; 1951 SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy()); 1952 for (unsigned i = 0; i != NumOps; ++i) { 1953 SDOperand Arg = Op.getOperand(5+2*i); 1954 1955 switch (Arg.getValueType()) { 1956 default: assert(0 && "Unexpected ValueType for argument!"); 1957 case MVT::i8: 1958 case MVT::i16: { 1959 // Promote the integer to 32 bits. If the input type is signed use a 1960 // sign extend, otherwise use a zero extend. 1961 unsigned ExtOp = 1962 dyn_cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue() ? 1963 ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 1964 Arg = DAG.getNode(ExtOp, MVT::i32, Arg); 1965 } 1966 // Fallthrough 1967 1968 case MVT::i32: 1969 case MVT::f32: { 1970 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1971 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1972 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1973 ArgOffset += 4; 1974 break; 1975 } 1976 case MVT::i64: 1977 case MVT::f64: { 1978 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1979 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1980 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1981 ArgOffset += 8; 1982 break; 1983 } 1984 } 1985 } 1986 1987 if (!MemOpChains.empty()) 1988 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1989 &MemOpChains[0], MemOpChains.size()); 1990 1991 // If the callee is a GlobalAddress node (quite common, every direct call is) 1992 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1993 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1994 // We should use extra load for direct calls to dllimported functions 1995 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), true)) 1996 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1997 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1998 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1999 2000 std::vector<MVT::ValueType> NodeTys; 2001 NodeTys.push_back(MVT::Other); // Returns a chain 2002 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 2003 std::vector<SDOperand> Ops; 2004 Ops.push_back(Chain); 2005 Ops.push_back(Callee); 2006 2007 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, 2008 NodeTys, &Ops[0], Ops.size()); 2009 SDOperand InFlag = Chain.getValue(1); 2010 2011 // Create the CALLSEQ_END node. 2012 unsigned NumBytesForCalleeToPush; 2013 2014 if (isVarArg) { 2015 NumBytesForCalleeToPush = 0; 2016 } else { 2017 NumBytesForCalleeToPush = NumBytes; 2018 } 2019 2020 NodeTys.clear(); 2021 NodeTys.push_back(MVT::Other); // Returns a chain 2022 if (RetVT != MVT::Other) 2023 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 2024 Ops.clear(); 2025 Ops.push_back(Chain); 2026 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 2027 Ops.push_back(DAG.getConstant(NumBytesForCalleeToPush, getPointerTy())); 2028 Ops.push_back(InFlag); 2029 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 2030 if (RetVT != MVT::Other) 2031 InFlag = Chain.getValue(1); 2032 2033 std::vector<SDOperand> ResultVals; 2034 NodeTys.clear(); 2035 switch (RetVT) { 2036 default: assert(0 && "Unknown value type to return!"); 2037 case MVT::Other: break; 2038 case MVT::i8: 2039 Chain = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag).getValue(1); 2040 ResultVals.push_back(Chain.getValue(0)); 2041 NodeTys.push_back(MVT::i8); 2042 break; 2043 case MVT::i16: 2044 Chain = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag).getValue(1); 2045 ResultVals.push_back(Chain.getValue(0)); 2046 NodeTys.push_back(MVT::i16); 2047 break; 2048 case MVT::i32: 2049 if (Op.Val->getValueType(1) == MVT::i32) { 2050 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1); 2051 ResultVals.push_back(Chain.getValue(0)); 2052 Chain = DAG.getCopyFromReg(Chain, X86::EDX, MVT::i32, 2053 Chain.getValue(2)).getValue(1); 2054 ResultVals.push_back(Chain.getValue(0)); 2055 NodeTys.push_back(MVT::i32); 2056 } else { 2057 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1); 2058 ResultVals.push_back(Chain.getValue(0)); 2059 } 2060 NodeTys.push_back(MVT::i32); 2061 break; 2062 case MVT::f32: 2063 case MVT::f64: { 2064 std::vector<MVT::ValueType> Tys; 2065 Tys.push_back(MVT::f64); 2066 Tys.push_back(MVT::Other); 2067 Tys.push_back(MVT::Flag); 2068 std::vector<SDOperand> Ops; 2069 Ops.push_back(Chain); 2070 Ops.push_back(InFlag); 2071 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, 2072 &Ops[0], Ops.size()); 2073 Chain = RetVal.getValue(1); 2074 InFlag = RetVal.getValue(2); 2075 if (X86ScalarSSE) { 2076 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This 2077 // shouldn't be necessary except that RFP cannot be live across 2078 // multiple blocks. When stackifier is fixed, they can be uncoupled. 2079 MachineFunction &MF = DAG.getMachineFunction(); 2080 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 2081 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 2082 Tys.clear(); 2083 Tys.push_back(MVT::Other); 2084 Ops.clear(); 2085 Ops.push_back(Chain); 2086 Ops.push_back(RetVal); 2087 Ops.push_back(StackSlot); 2088 Ops.push_back(DAG.getValueType(RetVT)); 2089 Ops.push_back(InFlag); 2090 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 2091 RetVal = DAG.getLoad(RetVT, Chain, StackSlot, NULL, 0); 2092 Chain = RetVal.getValue(1); 2093 } 2094 2095 if (RetVT == MVT::f32 && !X86ScalarSSE) 2096 // FIXME: we would really like to remember that this FP_ROUND 2097 // operation is okay to eliminate if we allow excess FP precision. 2098 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal); 2099 ResultVals.push_back(RetVal); 2100 NodeTys.push_back(RetVT); 2101 break; 2102 } 2103 } 2104 2105 // If the function returns void, just return the chain. 2106 if (ResultVals.empty()) 2107 return Chain; 2108 2109 // Otherwise, merge everything together with a MERGE_VALUES node. 2110 NodeTys.push_back(MVT::Other); 2111 ResultVals.push_back(Chain); 2112 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, 2113 &ResultVals[0], ResultVals.size()); 2114 return Res.getValue(Op.ResNo); 2115} 2116 2117//===----------------------------------------------------------------------===// 2118// FastCall Calling Convention implementation 2119//===----------------------------------------------------------------------===// 2120// 2121// The X86 'fastcall' calling convention passes up to two integer arguments in 2122// registers (an appropriate portion of ECX/EDX), passes arguments in C order, 2123// and requires that the callee pop its arguments off the stack (allowing proper 2124// tail calls), and has the same return value conventions as C calling convs. 2125// 2126// This calling convention always arranges for the callee pop value to be 8n+4 2127// bytes, which is needed for tail recursion elimination and stack alignment 2128// reasons. 2129// 2130 2131/// HowToPassFastCallCCArgument - Returns how an formal argument of the 2132/// specified type should be passed. If it is through stack, returns the size of 2133/// the stack slot; if it is through integer register, returns the number of 2134/// integer registers are needed. 2135static void 2136HowToPassFastCallCCArgument(MVT::ValueType ObjectVT, 2137 unsigned NumIntRegs, 2138 unsigned &ObjSize, 2139 unsigned &ObjIntRegs) 2140{ 2141 ObjSize = 0; 2142 ObjIntRegs = 0; 2143 2144 switch (ObjectVT) { 2145 default: assert(0 && "Unhandled argument type!"); 2146 case MVT::i8: 2147 if (NumIntRegs < 2) 2148 ObjIntRegs = 1; 2149 else 2150 ObjSize = 1; 2151 break; 2152 case MVT::i16: 2153 if (NumIntRegs < 2) 2154 ObjIntRegs = 1; 2155 else 2156 ObjSize = 2; 2157 break; 2158 case MVT::i32: 2159 if (NumIntRegs < 2) 2160 ObjIntRegs = 1; 2161 else 2162 ObjSize = 4; 2163 break; 2164 case MVT::i64: 2165 if (NumIntRegs+2 <= 2) { 2166 ObjIntRegs = 2; 2167 } else if (NumIntRegs+1 <= 2) { 2168 ObjIntRegs = 1; 2169 ObjSize = 4; 2170 } else 2171 ObjSize = 8; 2172 case MVT::f32: 2173 ObjSize = 4; 2174 break; 2175 case MVT::f64: 2176 ObjSize = 8; 2177 break; 2178 } 2179} 2180 2181SDOperand 2182X86TargetLowering::LowerFastCallCCArguments(SDOperand Op, SelectionDAG &DAG) { 2183 unsigned NumArgs = Op.Val->getNumValues()-1; 2184 MachineFunction &MF = DAG.getMachineFunction(); 2185 MachineFrameInfo *MFI = MF.getFrameInfo(); 2186 SDOperand Root = Op.getOperand(0); 2187 std::vector<SDOperand> ArgValues; 2188 2189 // Add DAG nodes to load the arguments... On entry to a function the stack 2190 // frame looks like this: 2191 // 2192 // [ESP] -- return address 2193 // [ESP + 4] -- first nonreg argument (leftmost lexically) 2194 // [ESP + 8] -- second nonreg argument, if 1st argument is <= 4 bytes in size 2195 // ... 2196 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot 2197 2198 // Keep track of the number of integer regs passed so far. This can be either 2199 // 0 (neither ECX or EDX used), 1 (ECX is used) or 2 (ECX and EDX are both 2200 // used). 2201 unsigned NumIntRegs = 0; 2202 2203 for (unsigned i = 0; i < NumArgs; ++i) { 2204 MVT::ValueType ObjectVT = Op.getValue(i).getValueType(); 2205 unsigned ArgIncrement = 4; 2206 unsigned ObjSize = 0; 2207 unsigned ObjIntRegs = 0; 2208 2209 HowToPassFastCallCCArgument(ObjectVT, NumIntRegs, ObjSize, ObjIntRegs); 2210 if (ObjSize > 4) 2211 ArgIncrement = ObjSize; 2212 2213 unsigned Reg = 0; 2214 SDOperand ArgValue; 2215 if (ObjIntRegs) { 2216 switch (ObjectVT) { 2217 default: assert(0 && "Unhandled argument type!"); 2218 case MVT::i8: 2219 Reg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::CL, 2220 X86::GR8RegisterClass); 2221 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i8); 2222 break; 2223 case MVT::i16: 2224 Reg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::CX, 2225 X86::GR16RegisterClass); 2226 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i16); 2227 break; 2228 case MVT::i32: 2229 Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::ECX, 2230 X86::GR32RegisterClass); 2231 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i32); 2232 break; 2233 case MVT::i64: 2234 Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::ECX, 2235 X86::GR32RegisterClass); 2236 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i32); 2237 if (ObjIntRegs == 2) { 2238 Reg = AddLiveIn(MF, X86::EDX, X86::GR32RegisterClass); 2239 SDOperand ArgValue2 = DAG.getCopyFromReg(Root, Reg, MVT::i32); 2240 ArgValue= DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2); 2241 } 2242 break; 2243 } 2244 2245 NumIntRegs += ObjIntRegs; 2246 } 2247 2248 if (ObjSize) { 2249 // Create the SelectionDAG nodes corresponding to a load from this 2250 // parameter. 2251 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); 2252 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 2253 if (ObjectVT == MVT::i64 && ObjIntRegs) { 2254 SDOperand ArgValue2 = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, 2255 NULL, 0); 2256 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2); 2257 } else 2258 ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0); 2259 ArgOffset += ArgIncrement; // Move on to the next argument. 2260 } 2261 2262 ArgValues.push_back(ArgValue); 2263 } 2264 2265 ArgValues.push_back(Root); 2266 2267 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 2268 // arguments and the arguments after the retaddr has been pushed are aligned. 2269 if ((ArgOffset & 7) == 0) 2270 ArgOffset += 4; 2271 2272 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 2273 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only. 2274 ReturnAddrIndex = 0; // No return address slot generated yet. 2275 BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments. 2276 BytesCallerReserves = 0; 2277 2278 MF.getInfo<X86FunctionInfo>()->setBytesToPopOnReturn(BytesToPopOnReturn); 2279 2280 // Finally, inform the code generator which regs we return values in. 2281 switch (getValueType(MF.getFunction()->getReturnType())) { 2282 default: assert(0 && "Unknown type!"); 2283 case MVT::isVoid: break; 2284 case MVT::i1: 2285 case MVT::i8: 2286 case MVT::i16: 2287 case MVT::i32: 2288 MF.addLiveOut(X86::ECX); 2289 break; 2290 case MVT::i64: 2291 MF.addLiveOut(X86::ECX); 2292 MF.addLiveOut(X86::EDX); 2293 break; 2294 case MVT::f32: 2295 case MVT::f64: 2296 MF.addLiveOut(X86::ST0); 2297 break; 2298 } 2299 2300 // Return the new list of results. 2301 std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(), 2302 Op.Val->value_end()); 2303 return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size()); 2304} 2305 2306SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { 2307 if (ReturnAddrIndex == 0) { 2308 // Set up a frame object for the return address. 2309 MachineFunction &MF = DAG.getMachineFunction(); 2310 if (Subtarget->is64Bit()) 2311 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8); 2312 else 2313 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4); 2314 } 2315 2316 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 2317} 2318 2319 2320 2321std::pair<SDOperand, SDOperand> X86TargetLowering:: 2322LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth, 2323 SelectionDAG &DAG) { 2324 SDOperand Result; 2325 if (Depth) // Depths > 0 not supported yet! 2326 Result = DAG.getConstant(0, getPointerTy()); 2327 else { 2328 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 2329 if (!isFrameAddress) 2330 // Just load the return address 2331 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, 2332 NULL, 0); 2333 else 2334 Result = DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI, 2335 DAG.getConstant(4, getPointerTy())); 2336 } 2337 return std::make_pair(Result, Chain); 2338} 2339 2340/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86 2341/// specific condition code. It returns a false if it cannot do a direct 2342/// translation. X86CC is the translated CondCode. LHS/RHS are modified as 2343/// needed. 2344static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 2345 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS, 2346 SelectionDAG &DAG) { 2347 X86CC = X86::COND_INVALID; 2348 if (!isFP) { 2349 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 2350 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 2351 // X > -1 -> X == 0, jump !sign. 2352 RHS = DAG.getConstant(0, RHS.getValueType()); 2353 X86CC = X86::COND_NS; 2354 return true; 2355 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 2356 // X < 0 -> X == 0, jump on sign. 2357 X86CC = X86::COND_S; 2358 return true; 2359 } 2360 } 2361 2362 switch (SetCCOpcode) { 2363 default: break; 2364 case ISD::SETEQ: X86CC = X86::COND_E; break; 2365 case ISD::SETGT: X86CC = X86::COND_G; break; 2366 case ISD::SETGE: X86CC = X86::COND_GE; break; 2367 case ISD::SETLT: X86CC = X86::COND_L; break; 2368 case ISD::SETLE: X86CC = X86::COND_LE; break; 2369 case ISD::SETNE: X86CC = X86::COND_NE; break; 2370 case ISD::SETULT: X86CC = X86::COND_B; break; 2371 case ISD::SETUGT: X86CC = X86::COND_A; break; 2372 case ISD::SETULE: X86CC = X86::COND_BE; break; 2373 case ISD::SETUGE: X86CC = X86::COND_AE; break; 2374 } 2375 } else { 2376 // On a floating point condition, the flags are set as follows: 2377 // ZF PF CF op 2378 // 0 | 0 | 0 | X > Y 2379 // 0 | 0 | 1 | X < Y 2380 // 1 | 0 | 0 | X == Y 2381 // 1 | 1 | 1 | unordered 2382 bool Flip = false; 2383 switch (SetCCOpcode) { 2384 default: break; 2385 case ISD::SETUEQ: 2386 case ISD::SETEQ: X86CC = X86::COND_E; break; 2387 case ISD::SETOLT: Flip = true; // Fallthrough 2388 case ISD::SETOGT: 2389 case ISD::SETGT: X86CC = X86::COND_A; break; 2390 case ISD::SETOLE: Flip = true; // Fallthrough 2391 case ISD::SETOGE: 2392 case ISD::SETGE: X86CC = X86::COND_AE; break; 2393 case ISD::SETUGT: Flip = true; // Fallthrough 2394 case ISD::SETULT: 2395 case ISD::SETLT: X86CC = X86::COND_B; break; 2396 case ISD::SETUGE: Flip = true; // Fallthrough 2397 case ISD::SETULE: 2398 case ISD::SETLE: X86CC = X86::COND_BE; break; 2399 case ISD::SETONE: 2400 case ISD::SETNE: X86CC = X86::COND_NE; break; 2401 case ISD::SETUO: X86CC = X86::COND_P; break; 2402 case ISD::SETO: X86CC = X86::COND_NP; break; 2403 } 2404 if (Flip) 2405 std::swap(LHS, RHS); 2406 } 2407 2408 return X86CC != X86::COND_INVALID; 2409} 2410 2411/// hasFPCMov - is there a floating point cmov for the specific X86 condition 2412/// code. Current x86 isa includes the following FP cmov instructions: 2413/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 2414static bool hasFPCMov(unsigned X86CC) { 2415 switch (X86CC) { 2416 default: 2417 return false; 2418 case X86::COND_B: 2419 case X86::COND_BE: 2420 case X86::COND_E: 2421 case X86::COND_P: 2422 case X86::COND_A: 2423 case X86::COND_AE: 2424 case X86::COND_NE: 2425 case X86::COND_NP: 2426 return true; 2427 } 2428} 2429 2430/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return 2431/// true if Op is undef or if its value falls within the specified range (L, H]. 2432static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { 2433 if (Op.getOpcode() == ISD::UNDEF) 2434 return true; 2435 2436 unsigned Val = cast<ConstantSDNode>(Op)->getValue(); 2437 return (Val >= Low && Val < Hi); 2438} 2439 2440/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return 2441/// true if Op is undef or if its value equal to the specified value. 2442static bool isUndefOrEqual(SDOperand Op, unsigned Val) { 2443 if (Op.getOpcode() == ISD::UNDEF) 2444 return true; 2445 return cast<ConstantSDNode>(Op)->getValue() == Val; 2446} 2447 2448/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand 2449/// specifies a shuffle of elements that is suitable for input to PSHUFD. 2450bool X86::isPSHUFDMask(SDNode *N) { 2451 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2452 2453 if (N->getNumOperands() != 4) 2454 return false; 2455 2456 // Check if the value doesn't reference the second vector. 2457 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2458 SDOperand Arg = N->getOperand(i); 2459 if (Arg.getOpcode() == ISD::UNDEF) continue; 2460 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2461 if (cast<ConstantSDNode>(Arg)->getValue() >= 4) 2462 return false; 2463 } 2464 2465 return true; 2466} 2467 2468/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand 2469/// specifies a shuffle of elements that is suitable for input to PSHUFHW. 2470bool X86::isPSHUFHWMask(SDNode *N) { 2471 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2472 2473 if (N->getNumOperands() != 8) 2474 return false; 2475 2476 // Lower quadword copied in order. 2477 for (unsigned i = 0; i != 4; ++i) { 2478 SDOperand Arg = N->getOperand(i); 2479 if (Arg.getOpcode() == ISD::UNDEF) continue; 2480 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2481 if (cast<ConstantSDNode>(Arg)->getValue() != i) 2482 return false; 2483 } 2484 2485 // Upper quadword shuffled. 2486 for (unsigned i = 4; i != 8; ++i) { 2487 SDOperand Arg = N->getOperand(i); 2488 if (Arg.getOpcode() == ISD::UNDEF) continue; 2489 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2490 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2491 if (Val < 4 || Val > 7) 2492 return false; 2493 } 2494 2495 return true; 2496} 2497 2498/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand 2499/// specifies a shuffle of elements that is suitable for input to PSHUFLW. 2500bool X86::isPSHUFLWMask(SDNode *N) { 2501 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2502 2503 if (N->getNumOperands() != 8) 2504 return false; 2505 2506 // Upper quadword copied in order. 2507 for (unsigned i = 4; i != 8; ++i) 2508 if (!isUndefOrEqual(N->getOperand(i), i)) 2509 return false; 2510 2511 // Lower quadword shuffled. 2512 for (unsigned i = 0; i != 4; ++i) 2513 if (!isUndefOrInRange(N->getOperand(i), 0, 4)) 2514 return false; 2515 2516 return true; 2517} 2518 2519/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 2520/// specifies a shuffle of elements that is suitable for input to SHUFP*. 2521static bool isSHUFPMask(std::vector<SDOperand> &N) { 2522 unsigned NumElems = N.size(); 2523 if (NumElems != 2 && NumElems != 4) return false; 2524 2525 unsigned Half = NumElems / 2; 2526 for (unsigned i = 0; i < Half; ++i) 2527 if (!isUndefOrInRange(N[i], 0, NumElems)) 2528 return false; 2529 for (unsigned i = Half; i < NumElems; ++i) 2530 if (!isUndefOrInRange(N[i], NumElems, NumElems*2)) 2531 return false; 2532 2533 return true; 2534} 2535 2536bool X86::isSHUFPMask(SDNode *N) { 2537 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2538 std::vector<SDOperand> Ops(N->op_begin(), N->op_end()); 2539 return ::isSHUFPMask(Ops); 2540} 2541 2542/// isCommutedSHUFP - Returns true if the shuffle mask is except 2543/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 2544/// half elements to come from vector 1 (which would equal the dest.) and 2545/// the upper half to come from vector 2. 2546static bool isCommutedSHUFP(std::vector<SDOperand> &Ops) { 2547 unsigned NumElems = Ops.size(); 2548 if (NumElems != 2 && NumElems != 4) return false; 2549 2550 unsigned Half = NumElems / 2; 2551 for (unsigned i = 0; i < Half; ++i) 2552 if (!isUndefOrInRange(Ops[i], NumElems, NumElems*2)) 2553 return false; 2554 for (unsigned i = Half; i < NumElems; ++i) 2555 if (!isUndefOrInRange(Ops[i], 0, NumElems)) 2556 return false; 2557 return true; 2558} 2559 2560static bool isCommutedSHUFP(SDNode *N) { 2561 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2562 std::vector<SDOperand> Ops(N->op_begin(), N->op_end()); 2563 return isCommutedSHUFP(Ops); 2564} 2565 2566/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 2567/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 2568bool X86::isMOVHLPSMask(SDNode *N) { 2569 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2570 2571 if (N->getNumOperands() != 4) 2572 return false; 2573 2574 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 2575 return isUndefOrEqual(N->getOperand(0), 6) && 2576 isUndefOrEqual(N->getOperand(1), 7) && 2577 isUndefOrEqual(N->getOperand(2), 2) && 2578 isUndefOrEqual(N->getOperand(3), 3); 2579} 2580 2581/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 2582/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 2583/// <2, 3, 2, 3> 2584bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) { 2585 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2586 2587 if (N->getNumOperands() != 4) 2588 return false; 2589 2590 // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3 2591 return isUndefOrEqual(N->getOperand(0), 2) && 2592 isUndefOrEqual(N->getOperand(1), 3) && 2593 isUndefOrEqual(N->getOperand(2), 2) && 2594 isUndefOrEqual(N->getOperand(3), 3); 2595} 2596 2597/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 2598/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 2599bool X86::isMOVLPMask(SDNode *N) { 2600 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2601 2602 unsigned NumElems = N->getNumOperands(); 2603 if (NumElems != 2 && NumElems != 4) 2604 return false; 2605 2606 for (unsigned i = 0; i < NumElems/2; ++i) 2607 if (!isUndefOrEqual(N->getOperand(i), i + NumElems)) 2608 return false; 2609 2610 for (unsigned i = NumElems/2; i < NumElems; ++i) 2611 if (!isUndefOrEqual(N->getOperand(i), i)) 2612 return false; 2613 2614 return true; 2615} 2616 2617/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand 2618/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D} 2619/// and MOVLHPS. 2620bool X86::isMOVHPMask(SDNode *N) { 2621 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2622 2623 unsigned NumElems = N->getNumOperands(); 2624 if (NumElems != 2 && NumElems != 4) 2625 return false; 2626 2627 for (unsigned i = 0; i < NumElems/2; ++i) 2628 if (!isUndefOrEqual(N->getOperand(i), i)) 2629 return false; 2630 2631 for (unsigned i = 0; i < NumElems/2; ++i) { 2632 SDOperand Arg = N->getOperand(i + NumElems/2); 2633 if (!isUndefOrEqual(Arg, i + NumElems)) 2634 return false; 2635 } 2636 2637 return true; 2638} 2639 2640/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 2641/// specifies a shuffle of elements that is suitable for input to UNPCKL. 2642bool static isUNPCKLMask(std::vector<SDOperand> &N, bool V2IsSplat = false) { 2643 unsigned NumElems = N.size(); 2644 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2645 return false; 2646 2647 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2648 SDOperand BitI = N[i]; 2649 SDOperand BitI1 = N[i+1]; 2650 if (!isUndefOrEqual(BitI, j)) 2651 return false; 2652 if (V2IsSplat) { 2653 if (isUndefOrEqual(BitI1, NumElems)) 2654 return false; 2655 } else { 2656 if (!isUndefOrEqual(BitI1, j + NumElems)) 2657 return false; 2658 } 2659 } 2660 2661 return true; 2662} 2663 2664bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) { 2665 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2666 std::vector<SDOperand> Ops(N->op_begin(), N->op_end()); 2667 return ::isUNPCKLMask(Ops, V2IsSplat); 2668} 2669 2670/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 2671/// specifies a shuffle of elements that is suitable for input to UNPCKH. 2672bool static isUNPCKHMask(std::vector<SDOperand> &N, bool V2IsSplat = false) { 2673 unsigned NumElems = N.size(); 2674 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2675 return false; 2676 2677 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2678 SDOperand BitI = N[i]; 2679 SDOperand BitI1 = N[i+1]; 2680 if (!isUndefOrEqual(BitI, j + NumElems/2)) 2681 return false; 2682 if (V2IsSplat) { 2683 if (isUndefOrEqual(BitI1, NumElems)) 2684 return false; 2685 } else { 2686 if (!isUndefOrEqual(BitI1, j + NumElems/2 + NumElems)) 2687 return false; 2688 } 2689 } 2690 2691 return true; 2692} 2693 2694bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) { 2695 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2696 std::vector<SDOperand> Ops(N->op_begin(), N->op_end()); 2697 return ::isUNPCKHMask(Ops, V2IsSplat); 2698} 2699 2700/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 2701/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 2702/// <0, 0, 1, 1> 2703bool X86::isUNPCKL_v_undef_Mask(SDNode *N) { 2704 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2705 2706 unsigned NumElems = N->getNumOperands(); 2707 if (NumElems != 4 && NumElems != 8 && NumElems != 16) 2708 return false; 2709 2710 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2711 SDOperand BitI = N->getOperand(i); 2712 SDOperand BitI1 = N->getOperand(i+1); 2713 2714 if (!isUndefOrEqual(BitI, j)) 2715 return false; 2716 if (!isUndefOrEqual(BitI1, j)) 2717 return false; 2718 } 2719 2720 return true; 2721} 2722 2723/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 2724/// specifies a shuffle of elements that is suitable for input to MOVSS, 2725/// MOVSD, and MOVD, i.e. setting the lowest element. 2726static bool isMOVLMask(std::vector<SDOperand> &N) { 2727 unsigned NumElems = N.size(); 2728 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2729 return false; 2730 2731 if (!isUndefOrEqual(N[0], NumElems)) 2732 return false; 2733 2734 for (unsigned i = 1; i < NumElems; ++i) { 2735 SDOperand Arg = N[i]; 2736 if (!isUndefOrEqual(Arg, i)) 2737 return false; 2738 } 2739 2740 return true; 2741} 2742 2743bool X86::isMOVLMask(SDNode *N) { 2744 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2745 std::vector<SDOperand> Ops(N->op_begin(), N->op_end()); 2746 return ::isMOVLMask(Ops); 2747} 2748 2749/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 2750/// of what x86 movss want. X86 movs requires the lowest element to be lowest 2751/// element of vector 2 and the other elements to come from vector 1 in order. 2752static bool isCommutedMOVL(std::vector<SDOperand> &Ops, bool V2IsSplat = false, 2753 bool V2IsUndef = false) { 2754 unsigned NumElems = Ops.size(); 2755 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2756 return false; 2757 2758 if (!isUndefOrEqual(Ops[0], 0)) 2759 return false; 2760 2761 for (unsigned i = 1; i < NumElems; ++i) { 2762 SDOperand Arg = Ops[i]; 2763 if (!(isUndefOrEqual(Arg, i+NumElems) || 2764 (V2IsUndef && isUndefOrInRange(Arg, NumElems, NumElems*2)) || 2765 (V2IsSplat && isUndefOrEqual(Arg, NumElems)))) 2766 return false; 2767 } 2768 2769 return true; 2770} 2771 2772static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false, 2773 bool V2IsUndef = false) { 2774 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2775 std::vector<SDOperand> Ops(N->op_begin(), N->op_end()); 2776 return isCommutedMOVL(Ops, V2IsSplat, V2IsUndef); 2777} 2778 2779/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2780/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 2781bool X86::isMOVSHDUPMask(SDNode *N) { 2782 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2783 2784 if (N->getNumOperands() != 4) 2785 return false; 2786 2787 // Expect 1, 1, 3, 3 2788 for (unsigned i = 0; i < 2; ++i) { 2789 SDOperand Arg = N->getOperand(i); 2790 if (Arg.getOpcode() == ISD::UNDEF) continue; 2791 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2792 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2793 if (Val != 1) return false; 2794 } 2795 2796 bool HasHi = false; 2797 for (unsigned i = 2; i < 4; ++i) { 2798 SDOperand Arg = N->getOperand(i); 2799 if (Arg.getOpcode() == ISD::UNDEF) continue; 2800 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2801 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2802 if (Val != 3) return false; 2803 HasHi = true; 2804 } 2805 2806 // Don't use movshdup if it can be done with a shufps. 2807 return HasHi; 2808} 2809 2810/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2811/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 2812bool X86::isMOVSLDUPMask(SDNode *N) { 2813 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2814 2815 if (N->getNumOperands() != 4) 2816 return false; 2817 2818 // Expect 0, 0, 2, 2 2819 for (unsigned i = 0; i < 2; ++i) { 2820 SDOperand Arg = N->getOperand(i); 2821 if (Arg.getOpcode() == ISD::UNDEF) continue; 2822 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2823 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2824 if (Val != 0) return false; 2825 } 2826 2827 bool HasHi = false; 2828 for (unsigned i = 2; i < 4; ++i) { 2829 SDOperand Arg = N->getOperand(i); 2830 if (Arg.getOpcode() == ISD::UNDEF) continue; 2831 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2832 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2833 if (Val != 2) return false; 2834 HasHi = true; 2835 } 2836 2837 // Don't use movshdup if it can be done with a shufps. 2838 return HasHi; 2839} 2840 2841/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2842/// a splat of a single element. 2843static bool isSplatMask(SDNode *N) { 2844 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2845 2846 // This is a splat operation if each element of the permute is the same, and 2847 // if the value doesn't reference the second vector. 2848 unsigned NumElems = N->getNumOperands(); 2849 SDOperand ElementBase; 2850 unsigned i = 0; 2851 for (; i != NumElems; ++i) { 2852 SDOperand Elt = N->getOperand(i); 2853 if (isa<ConstantSDNode>(Elt)) { 2854 ElementBase = Elt; 2855 break; 2856 } 2857 } 2858 2859 if (!ElementBase.Val) 2860 return false; 2861 2862 for (; i != NumElems; ++i) { 2863 SDOperand Arg = N->getOperand(i); 2864 if (Arg.getOpcode() == ISD::UNDEF) continue; 2865 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2866 if (Arg != ElementBase) return false; 2867 } 2868 2869 // Make sure it is a splat of the first vector operand. 2870 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems; 2871} 2872 2873/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2874/// a splat of a single element and it's a 2 or 4 element mask. 2875bool X86::isSplatMask(SDNode *N) { 2876 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2877 2878 // We can only splat 64-bit, and 32-bit quantities with a single instruction. 2879 if (N->getNumOperands() != 4 && N->getNumOperands() != 2) 2880 return false; 2881 return ::isSplatMask(N); 2882} 2883 2884/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand 2885/// specifies a splat of zero element. 2886bool X86::isSplatLoMask(SDNode *N) { 2887 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2888 2889 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) 2890 if (!isUndefOrEqual(N->getOperand(i), 0)) 2891 return false; 2892 return true; 2893} 2894 2895/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 2896/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* 2897/// instructions. 2898unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 2899 unsigned NumOperands = N->getNumOperands(); 2900 unsigned Shift = (NumOperands == 4) ? 2 : 1; 2901 unsigned Mask = 0; 2902 for (unsigned i = 0; i < NumOperands; ++i) { 2903 unsigned Val = 0; 2904 SDOperand Arg = N->getOperand(NumOperands-i-1); 2905 if (Arg.getOpcode() != ISD::UNDEF) 2906 Val = cast<ConstantSDNode>(Arg)->getValue(); 2907 if (Val >= NumOperands) Val -= NumOperands; 2908 Mask |= Val; 2909 if (i != NumOperands - 1) 2910 Mask <<= Shift; 2911 } 2912 2913 return Mask; 2914} 2915 2916/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 2917/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW 2918/// instructions. 2919unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 2920 unsigned Mask = 0; 2921 // 8 nodes, but we only care about the last 4. 2922 for (unsigned i = 7; i >= 4; --i) { 2923 unsigned Val = 0; 2924 SDOperand Arg = N->getOperand(i); 2925 if (Arg.getOpcode() != ISD::UNDEF) 2926 Val = cast<ConstantSDNode>(Arg)->getValue(); 2927 Mask |= (Val - 4); 2928 if (i != 4) 2929 Mask <<= 2; 2930 } 2931 2932 return Mask; 2933} 2934 2935/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 2936/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW 2937/// instructions. 2938unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 2939 unsigned Mask = 0; 2940 // 8 nodes, but we only care about the first 4. 2941 for (int i = 3; i >= 0; --i) { 2942 unsigned Val = 0; 2943 SDOperand Arg = N->getOperand(i); 2944 if (Arg.getOpcode() != ISD::UNDEF) 2945 Val = cast<ConstantSDNode>(Arg)->getValue(); 2946 Mask |= Val; 2947 if (i != 0) 2948 Mask <<= 2; 2949 } 2950 2951 return Mask; 2952} 2953 2954/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand 2955/// specifies a 8 element shuffle that can be broken into a pair of 2956/// PSHUFHW and PSHUFLW. 2957static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { 2958 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2959 2960 if (N->getNumOperands() != 8) 2961 return false; 2962 2963 // Lower quadword shuffled. 2964 for (unsigned i = 0; i != 4; ++i) { 2965 SDOperand Arg = N->getOperand(i); 2966 if (Arg.getOpcode() == ISD::UNDEF) continue; 2967 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2968 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2969 if (Val > 4) 2970 return false; 2971 } 2972 2973 // Upper quadword shuffled. 2974 for (unsigned i = 4; i != 8; ++i) { 2975 SDOperand Arg = N->getOperand(i); 2976 if (Arg.getOpcode() == ISD::UNDEF) continue; 2977 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2978 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2979 if (Val < 4 || Val > 7) 2980 return false; 2981 } 2982 2983 return true; 2984} 2985 2986/// CommuteVectorShuffle - Swap vector_shuffle operandsas well as 2987/// values in ther permute mask. 2988static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, 2989 SDOperand &V2, SDOperand &Mask, 2990 SelectionDAG &DAG) { 2991 MVT::ValueType VT = Op.getValueType(); 2992 MVT::ValueType MaskVT = Mask.getValueType(); 2993 MVT::ValueType EltVT = MVT::getVectorBaseType(MaskVT); 2994 unsigned NumElems = Mask.getNumOperands(); 2995 std::vector<SDOperand> MaskVec; 2996 2997 for (unsigned i = 0; i != NumElems; ++i) { 2998 SDOperand Arg = Mask.getOperand(i); 2999 if (Arg.getOpcode() == ISD::UNDEF) { 3000 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 3001 continue; 3002 } 3003 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 3004 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 3005 if (Val < NumElems) 3006 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 3007 else 3008 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 3009 } 3010 3011 std::swap(V1, V2); 3012 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 3013 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 3014} 3015 3016/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 3017/// match movhlps. The lower half elements should come from upper half of 3018/// V1 (and in order), and the upper half elements should come from the upper 3019/// half of V2 (and in order). 3020static bool ShouldXformToMOVHLPS(SDNode *Mask) { 3021 unsigned NumElems = Mask->getNumOperands(); 3022 if (NumElems != 4) 3023 return false; 3024 for (unsigned i = 0, e = 2; i != e; ++i) 3025 if (!isUndefOrEqual(Mask->getOperand(i), i+2)) 3026 return false; 3027 for (unsigned i = 2; i != 4; ++i) 3028 if (!isUndefOrEqual(Mask->getOperand(i), i+4)) 3029 return false; 3030 return true; 3031} 3032 3033/// isScalarLoadToVector - Returns true if the node is a scalar load that 3034/// is promoted to a vector. 3035static inline bool isScalarLoadToVector(SDNode *N) { 3036 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) { 3037 N = N->getOperand(0).Val; 3038 return ISD::isNON_EXTLoad(N); 3039 } 3040 return false; 3041} 3042 3043/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 3044/// match movlp{s|d}. The lower half elements should come from lower half of 3045/// V1 (and in order), and the upper half elements should come from the upper 3046/// half of V2 (and in order). And since V1 will become the source of the 3047/// MOVLP, it must be either a vector load or a scalar load to vector. 3048static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) { 3049 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 3050 return false; 3051 // Is V2 is a vector load, don't do this transformation. We will try to use 3052 // load folding shufps op. 3053 if (ISD::isNON_EXTLoad(V2)) 3054 return false; 3055 3056 unsigned NumElems = Mask->getNumOperands(); 3057 if (NumElems != 2 && NumElems != 4) 3058 return false; 3059 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3060 if (!isUndefOrEqual(Mask->getOperand(i), i)) 3061 return false; 3062 for (unsigned i = NumElems/2; i != NumElems; ++i) 3063 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems)) 3064 return false; 3065 return true; 3066} 3067 3068/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 3069/// all the same. 3070static bool isSplatVector(SDNode *N) { 3071 if (N->getOpcode() != ISD::BUILD_VECTOR) 3072 return false; 3073 3074 SDOperand SplatValue = N->getOperand(0); 3075 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 3076 if (N->getOperand(i) != SplatValue) 3077 return false; 3078 return true; 3079} 3080 3081/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 3082/// to an undef. 3083static bool isUndefShuffle(SDNode *N) { 3084 if (N->getOpcode() != ISD::BUILD_VECTOR) 3085 return false; 3086 3087 SDOperand V1 = N->getOperand(0); 3088 SDOperand V2 = N->getOperand(1); 3089 SDOperand Mask = N->getOperand(2); 3090 unsigned NumElems = Mask.getNumOperands(); 3091 for (unsigned i = 0; i != NumElems; ++i) { 3092 SDOperand Arg = Mask.getOperand(i); 3093 if (Arg.getOpcode() != ISD::UNDEF) { 3094 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 3095 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF) 3096 return false; 3097 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF) 3098 return false; 3099 } 3100 } 3101 return true; 3102} 3103 3104/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 3105/// that point to V2 points to its first element. 3106static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { 3107 assert(Mask.getOpcode() == ISD::BUILD_VECTOR); 3108 3109 bool Changed = false; 3110 std::vector<SDOperand> MaskVec; 3111 unsigned NumElems = Mask.getNumOperands(); 3112 for (unsigned i = 0; i != NumElems; ++i) { 3113 SDOperand Arg = Mask.getOperand(i); 3114 if (Arg.getOpcode() != ISD::UNDEF) { 3115 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 3116 if (Val > NumElems) { 3117 Arg = DAG.getConstant(NumElems, Arg.getValueType()); 3118 Changed = true; 3119 } 3120 } 3121 MaskVec.push_back(Arg); 3122 } 3123 3124 if (Changed) 3125 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), 3126 &MaskVec[0], MaskVec.size()); 3127 return Mask; 3128} 3129 3130/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 3131/// operation of specified width. 3132static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { 3133 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3134 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 3135 3136 std::vector<SDOperand> MaskVec; 3137 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); 3138 for (unsigned i = 1; i != NumElems; ++i) 3139 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3140 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 3141} 3142 3143/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation 3144/// of specified width. 3145static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { 3146 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3147 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 3148 std::vector<SDOperand> MaskVec; 3149 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 3150 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3151 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); 3152 } 3153 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 3154} 3155 3156/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation 3157/// of specified width. 3158static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { 3159 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3160 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 3161 unsigned Half = NumElems/2; 3162 std::vector<SDOperand> MaskVec; 3163 for (unsigned i = 0; i != Half; ++i) { 3164 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); 3165 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); 3166 } 3167 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 3168} 3169 3170/// getZeroVector - Returns a vector of specified type with all zero elements. 3171/// 3172static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) { 3173 assert(MVT::isVector(VT) && "Expected a vector type"); 3174 unsigned NumElems = getVectorNumElements(VT); 3175 MVT::ValueType EVT = MVT::getVectorBaseType(VT); 3176 bool isFP = MVT::isFloatingPoint(EVT); 3177 SDOperand Zero = isFP ? DAG.getConstantFP(0.0, EVT) : DAG.getConstant(0, EVT); 3178 std::vector<SDOperand> ZeroVec(NumElems, Zero); 3179 return DAG.getNode(ISD::BUILD_VECTOR, VT, &ZeroVec[0], ZeroVec.size()); 3180} 3181 3182/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32. 3183/// 3184static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) { 3185 SDOperand V1 = Op.getOperand(0); 3186 SDOperand Mask = Op.getOperand(2); 3187 MVT::ValueType VT = Op.getValueType(); 3188 unsigned NumElems = Mask.getNumOperands(); 3189 Mask = getUnpacklMask(NumElems, DAG); 3190 while (NumElems != 4) { 3191 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask); 3192 NumElems >>= 1; 3193 } 3194 V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1); 3195 3196 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3197 Mask = getZeroVector(MaskVT, DAG); 3198 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1, 3199 DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask); 3200 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); 3201} 3202 3203/// isZeroNode - Returns true if Elt is a constant zero or a floating point 3204/// constant +0.0. 3205static inline bool isZeroNode(SDOperand Elt) { 3206 return ((isa<ConstantSDNode>(Elt) && 3207 cast<ConstantSDNode>(Elt)->getValue() == 0) || 3208 (isa<ConstantFPSDNode>(Elt) && 3209 cast<ConstantFPSDNode>(Elt)->isExactlyValue(0.0))); 3210} 3211 3212/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 3213/// vector and zero or undef vector. 3214static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT, 3215 unsigned NumElems, unsigned Idx, 3216 bool isZero, SelectionDAG &DAG) { 3217 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT); 3218 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3219 MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT); 3220 SDOperand Zero = DAG.getConstant(0, EVT); 3221 std::vector<SDOperand> MaskVec(NumElems, Zero); 3222 MaskVec[Idx] = DAG.getConstant(NumElems, EVT); 3223 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3224 &MaskVec[0], MaskVec.size()); 3225 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 3226} 3227 3228/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 3229/// 3230static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, 3231 unsigned NumNonZero, unsigned NumZero, 3232 SelectionDAG &DAG, TargetLowering &TLI) { 3233 if (NumNonZero > 8) 3234 return SDOperand(); 3235 3236 SDOperand V(0, 0); 3237 bool First = true; 3238 for (unsigned i = 0; i < 16; ++i) { 3239 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 3240 if (ThisIsNonZero && First) { 3241 if (NumZero) 3242 V = getZeroVector(MVT::v8i16, DAG); 3243 else 3244 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 3245 First = false; 3246 } 3247 3248 if ((i & 1) != 0) { 3249 SDOperand ThisElt(0, 0), LastElt(0, 0); 3250 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 3251 if (LastIsNonZero) { 3252 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); 3253 } 3254 if (ThisIsNonZero) { 3255 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i)); 3256 ThisElt = DAG.getNode(ISD::SHL, MVT::i16, 3257 ThisElt, DAG.getConstant(8, MVT::i8)); 3258 if (LastIsNonZero) 3259 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt); 3260 } else 3261 ThisElt = LastElt; 3262 3263 if (ThisElt.Val) 3264 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt, 3265 DAG.getConstant(i/2, TLI.getPointerTy())); 3266 } 3267 } 3268 3269 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V); 3270} 3271 3272/// LowerBuildVectorv16i8 - Custom lower build_vector of v8i16. 3273/// 3274static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, 3275 unsigned NumNonZero, unsigned NumZero, 3276 SelectionDAG &DAG, TargetLowering &TLI) { 3277 if (NumNonZero > 4) 3278 return SDOperand(); 3279 3280 SDOperand V(0, 0); 3281 bool First = true; 3282 for (unsigned i = 0; i < 8; ++i) { 3283 bool isNonZero = (NonZeros & (1 << i)) != 0; 3284 if (isNonZero) { 3285 if (First) { 3286 if (NumZero) 3287 V = getZeroVector(MVT::v8i16, DAG); 3288 else 3289 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 3290 First = false; 3291 } 3292 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i), 3293 DAG.getConstant(i, TLI.getPointerTy())); 3294 } 3295 } 3296 3297 return V; 3298} 3299 3300SDOperand 3301X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3302 // All zero's are handled with pxor. 3303 if (ISD::isBuildVectorAllZeros(Op.Val)) 3304 return Op; 3305 3306 // All one's are handled with pcmpeqd. 3307 if (ISD::isBuildVectorAllOnes(Op.Val)) 3308 return Op; 3309 3310 MVT::ValueType VT = Op.getValueType(); 3311 MVT::ValueType EVT = MVT::getVectorBaseType(VT); 3312 unsigned EVTBits = MVT::getSizeInBits(EVT); 3313 3314 unsigned NumElems = Op.getNumOperands(); 3315 unsigned NumZero = 0; 3316 unsigned NumNonZero = 0; 3317 unsigned NonZeros = 0; 3318 std::set<SDOperand> Values; 3319 for (unsigned i = 0; i < NumElems; ++i) { 3320 SDOperand Elt = Op.getOperand(i); 3321 if (Elt.getOpcode() != ISD::UNDEF) { 3322 Values.insert(Elt); 3323 if (isZeroNode(Elt)) 3324 NumZero++; 3325 else { 3326 NonZeros |= (1 << i); 3327 NumNonZero++; 3328 } 3329 } 3330 } 3331 3332 if (NumNonZero == 0) 3333 // Must be a mix of zero and undef. Return a zero vector. 3334 return getZeroVector(VT, DAG); 3335 3336 // Splat is obviously ok. Let legalizer expand it to a shuffle. 3337 if (Values.size() == 1) 3338 return SDOperand(); 3339 3340 // Special case for single non-zero element. 3341 if (NumNonZero == 1) { 3342 unsigned Idx = CountTrailingZeros_32(NonZeros); 3343 SDOperand Item = Op.getOperand(Idx); 3344 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 3345 if (Idx == 0) 3346 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 3347 return getShuffleVectorZeroOrUndef(Item, VT, NumElems, Idx, 3348 NumZero > 0, DAG); 3349 3350 if (EVTBits == 32) { 3351 // Turn it into a shuffle of zero and zero-extended scalar to vector. 3352 Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0, 3353 DAG); 3354 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3355 MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT); 3356 std::vector<SDOperand> MaskVec; 3357 for (unsigned i = 0; i < NumElems; i++) 3358 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); 3359 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3360 &MaskVec[0], MaskVec.size()); 3361 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item, 3362 DAG.getNode(ISD::UNDEF, VT), Mask); 3363 } 3364 } 3365 3366 // Let legalizer expand 2-wide build_vector's. 3367 if (EVTBits == 64) 3368 return SDOperand(); 3369 3370 // If element VT is < 32 bits, convert it to inserts into a zero vector. 3371 if (EVTBits == 8) { 3372 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 3373 *this); 3374 if (V.Val) return V; 3375 } 3376 3377 if (EVTBits == 16) { 3378 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 3379 *this); 3380 if (V.Val) return V; 3381 } 3382 3383 // If element VT is == 32 bits, turn it into a number of shuffles. 3384 std::vector<SDOperand> V(NumElems); 3385 if (NumElems == 4 && NumZero > 0) { 3386 for (unsigned i = 0; i < 4; ++i) { 3387 bool isZero = !(NonZeros & (1 << i)); 3388 if (isZero) 3389 V[i] = getZeroVector(VT, DAG); 3390 else 3391 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3392 } 3393 3394 for (unsigned i = 0; i < 2; ++i) { 3395 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 3396 default: break; 3397 case 0: 3398 V[i] = V[i*2]; // Must be a zero vector. 3399 break; 3400 case 1: 3401 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2], 3402 getMOVLMask(NumElems, DAG)); 3403 break; 3404 case 2: 3405 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3406 getMOVLMask(NumElems, DAG)); 3407 break; 3408 case 3: 3409 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3410 getUnpacklMask(NumElems, DAG)); 3411 break; 3412 } 3413 } 3414 3415 // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd) 3416 // clears the upper bits. 3417 // FIXME: we can do the same for v4f32 case when we know both parts of 3418 // the lower half come from scalar_to_vector (loadf32). We should do 3419 // that in post legalizer dag combiner with target specific hooks. 3420 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0) 3421 return V[0]; 3422 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3423 MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT); 3424 std::vector<SDOperand> MaskVec; 3425 bool Reverse = (NonZeros & 0x3) == 2; 3426 for (unsigned i = 0; i < 2; ++i) 3427 if (Reverse) 3428 MaskVec.push_back(DAG.getConstant(1-i, EVT)); 3429 else 3430 MaskVec.push_back(DAG.getConstant(i, EVT)); 3431 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 3432 for (unsigned i = 0; i < 2; ++i) 3433 if (Reverse) 3434 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); 3435 else 3436 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); 3437 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3438 &MaskVec[0], MaskVec.size()); 3439 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask); 3440 } 3441 3442 if (Values.size() > 2) { 3443 // Expand into a number of unpckl*. 3444 // e.g. for v4f32 3445 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 3446 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 3447 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 3448 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG); 3449 for (unsigned i = 0; i < NumElems; ++i) 3450 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3451 NumElems >>= 1; 3452 while (NumElems != 0) { 3453 for (unsigned i = 0; i < NumElems; ++i) 3454 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems], 3455 UnpckMask); 3456 NumElems >>= 1; 3457 } 3458 return V[0]; 3459 } 3460 3461 return SDOperand(); 3462} 3463 3464SDOperand 3465X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 3466 SDOperand V1 = Op.getOperand(0); 3467 SDOperand V2 = Op.getOperand(1); 3468 SDOperand PermMask = Op.getOperand(2); 3469 MVT::ValueType VT = Op.getValueType(); 3470 unsigned NumElems = PermMask.getNumOperands(); 3471 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 3472 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 3473 bool V1IsSplat = false; 3474 bool V2IsSplat = false; 3475 3476 if (isUndefShuffle(Op.Val)) 3477 return DAG.getNode(ISD::UNDEF, VT); 3478 3479 if (isSplatMask(PermMask.Val)) { 3480 if (NumElems <= 4) return Op; 3481 // Promote it to a v4i32 splat. 3482 return PromoteSplat(Op, DAG); 3483 } 3484 3485 if (X86::isMOVLMask(PermMask.Val)) 3486 return (V1IsUndef) ? V2 : Op; 3487 3488 if (X86::isMOVSHDUPMask(PermMask.Val) || 3489 X86::isMOVSLDUPMask(PermMask.Val) || 3490 X86::isMOVHLPSMask(PermMask.Val) || 3491 X86::isMOVHPMask(PermMask.Val) || 3492 X86::isMOVLPMask(PermMask.Val)) 3493 return Op; 3494 3495 if (ShouldXformToMOVHLPS(PermMask.Val) || 3496 ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val)) 3497 return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3498 3499 bool Commuted = false; 3500 V1IsSplat = isSplatVector(V1.Val); 3501 V2IsSplat = isSplatVector(V2.Val); 3502 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 3503 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3504 std::swap(V1IsSplat, V2IsSplat); 3505 std::swap(V1IsUndef, V2IsUndef); 3506 Commuted = true; 3507 } 3508 3509 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) { 3510 if (V2IsUndef) return V1; 3511 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3512 if (V2IsSplat) { 3513 // V2 is a splat, so the mask may be malformed. That is, it may point 3514 // to any V2 element. The instruction selectior won't like this. Get 3515 // a corrected mask and commute to form a proper MOVS{S|D}. 3516 SDOperand NewMask = getMOVLMask(NumElems, DAG); 3517 if (NewMask.Val != PermMask.Val) 3518 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3519 } 3520 return Op; 3521 } 3522 3523 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3524 X86::isUNPCKLMask(PermMask.Val) || 3525 X86::isUNPCKHMask(PermMask.Val)) 3526 return Op; 3527 3528 if (V2IsSplat) { 3529 // Normalize mask so all entries that point to V2 points to its first 3530 // element then try to match unpck{h|l} again. If match, return a 3531 // new vector_shuffle with the corrected mask. 3532 SDOperand NewMask = NormalizeMask(PermMask, DAG); 3533 if (NewMask.Val != PermMask.Val) { 3534 if (X86::isUNPCKLMask(PermMask.Val, true)) { 3535 SDOperand NewMask = getUnpacklMask(NumElems, DAG); 3536 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3537 } else if (X86::isUNPCKHMask(PermMask.Val, true)) { 3538 SDOperand NewMask = getUnpackhMask(NumElems, DAG); 3539 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3540 } 3541 } 3542 } 3543 3544 // Normalize the node to match x86 shuffle ops if needed 3545 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val)) 3546 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3547 3548 if (Commuted) { 3549 // Commute is back and try unpck* again. 3550 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3551 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3552 X86::isUNPCKLMask(PermMask.Val) || 3553 X86::isUNPCKHMask(PermMask.Val)) 3554 return Op; 3555 } 3556 3557 // If VT is integer, try PSHUF* first, then SHUFP*. 3558 if (MVT::isInteger(VT)) { 3559 if (X86::isPSHUFDMask(PermMask.Val) || 3560 X86::isPSHUFHWMask(PermMask.Val) || 3561 X86::isPSHUFLWMask(PermMask.Val)) { 3562 if (V2.getOpcode() != ISD::UNDEF) 3563 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3564 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3565 return Op; 3566 } 3567 3568 if (X86::isSHUFPMask(PermMask.Val)) 3569 return Op; 3570 3571 // Handle v8i16 shuffle high / low shuffle node pair. 3572 if (VT == MVT::v8i16 && isPSHUFHW_PSHUFLWMask(PermMask.Val)) { 3573 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3574 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 3575 std::vector<SDOperand> MaskVec; 3576 for (unsigned i = 0; i != 4; ++i) 3577 MaskVec.push_back(PermMask.getOperand(i)); 3578 for (unsigned i = 4; i != 8; ++i) 3579 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3580 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3581 &MaskVec[0], MaskVec.size()); 3582 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 3583 MaskVec.clear(); 3584 for (unsigned i = 0; i != 4; ++i) 3585 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3586 for (unsigned i = 4; i != 8; ++i) 3587 MaskVec.push_back(PermMask.getOperand(i)); 3588 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0],MaskVec.size()); 3589 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 3590 } 3591 } else { 3592 // Floating point cases in the other order. 3593 if (X86::isSHUFPMask(PermMask.Val)) 3594 return Op; 3595 if (X86::isPSHUFDMask(PermMask.Val) || 3596 X86::isPSHUFHWMask(PermMask.Val) || 3597 X86::isPSHUFLWMask(PermMask.Val)) { 3598 if (V2.getOpcode() != ISD::UNDEF) 3599 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3600 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3601 return Op; 3602 } 3603 } 3604 3605 if (NumElems == 4) { 3606 MVT::ValueType MaskVT = PermMask.getValueType(); 3607 MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT); 3608 std::vector<std::pair<int, int> > Locs; 3609 Locs.reserve(NumElems); 3610 std::vector<SDOperand> Mask1(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3611 std::vector<SDOperand> Mask2(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3612 unsigned NumHi = 0; 3613 unsigned NumLo = 0; 3614 // If no more than two elements come from either vector. This can be 3615 // implemented with two shuffles. First shuffle gather the elements. 3616 // The second shuffle, which takes the first shuffle as both of its 3617 // vector operands, put the elements into the right order. 3618 for (unsigned i = 0; i != NumElems; ++i) { 3619 SDOperand Elt = PermMask.getOperand(i); 3620 if (Elt.getOpcode() == ISD::UNDEF) { 3621 Locs[i] = std::make_pair(-1, -1); 3622 } else { 3623 unsigned Val = cast<ConstantSDNode>(Elt)->getValue(); 3624 if (Val < NumElems) { 3625 Locs[i] = std::make_pair(0, NumLo); 3626 Mask1[NumLo] = Elt; 3627 NumLo++; 3628 } else { 3629 Locs[i] = std::make_pair(1, NumHi); 3630 if (2+NumHi < NumElems) 3631 Mask1[2+NumHi] = Elt; 3632 NumHi++; 3633 } 3634 } 3635 } 3636 if (NumLo <= 2 && NumHi <= 2) { 3637 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3638 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3639 &Mask1[0], Mask1.size())); 3640 for (unsigned i = 0; i != NumElems; ++i) { 3641 if (Locs[i].first == -1) 3642 continue; 3643 else { 3644 unsigned Idx = (i < NumElems/2) ? 0 : NumElems; 3645 Idx += Locs[i].first * (NumElems/2) + Locs[i].second; 3646 Mask2[i] = DAG.getConstant(Idx, MaskEVT); 3647 } 3648 } 3649 3650 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, 3651 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3652 &Mask2[0], Mask2.size())); 3653 } 3654 3655 // Break it into (shuffle shuffle_hi, shuffle_lo). 3656 Locs.clear(); 3657 std::vector<SDOperand> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3658 std::vector<SDOperand> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3659 std::vector<SDOperand> *MaskPtr = &LoMask; 3660 unsigned MaskIdx = 0; 3661 unsigned LoIdx = 0; 3662 unsigned HiIdx = NumElems/2; 3663 for (unsigned i = 0; i != NumElems; ++i) { 3664 if (i == NumElems/2) { 3665 MaskPtr = &HiMask; 3666 MaskIdx = 1; 3667 LoIdx = 0; 3668 HiIdx = NumElems/2; 3669 } 3670 SDOperand Elt = PermMask.getOperand(i); 3671 if (Elt.getOpcode() == ISD::UNDEF) { 3672 Locs[i] = std::make_pair(-1, -1); 3673 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) { 3674 Locs[i] = std::make_pair(MaskIdx, LoIdx); 3675 (*MaskPtr)[LoIdx] = Elt; 3676 LoIdx++; 3677 } else { 3678 Locs[i] = std::make_pair(MaskIdx, HiIdx); 3679 (*MaskPtr)[HiIdx] = Elt; 3680 HiIdx++; 3681 } 3682 } 3683 3684 SDOperand LoShuffle = 3685 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3686 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3687 &LoMask[0], LoMask.size())); 3688 SDOperand HiShuffle = 3689 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3690 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3691 &HiMask[0], HiMask.size())); 3692 std::vector<SDOperand> MaskOps; 3693 for (unsigned i = 0; i != NumElems; ++i) { 3694 if (Locs[i].first == -1) { 3695 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3696 } else { 3697 unsigned Idx = Locs[i].first * NumElems + Locs[i].second; 3698 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT)); 3699 } 3700 } 3701 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle, 3702 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3703 &MaskOps[0], MaskOps.size())); 3704 } 3705 3706 return SDOperand(); 3707} 3708 3709SDOperand 3710X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3711 if (!isa<ConstantSDNode>(Op.getOperand(1))) 3712 return SDOperand(); 3713 3714 MVT::ValueType VT = Op.getValueType(); 3715 // TODO: handle v16i8. 3716 if (MVT::getSizeInBits(VT) == 16) { 3717 // Transform it so it match pextrw which produces a 32-bit result. 3718 MVT::ValueType EVT = (MVT::ValueType)(VT+1); 3719 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, 3720 Op.getOperand(0), Op.getOperand(1)); 3721 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, 3722 DAG.getValueType(VT)); 3723 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3724 } else if (MVT::getSizeInBits(VT) == 32) { 3725 SDOperand Vec = Op.getOperand(0); 3726 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3727 if (Idx == 0) 3728 return Op; 3729 // SHUFPS the element to the lowest double word, then movss. 3730 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3731 std::vector<SDOperand> IdxVec; 3732 IdxVec.push_back(DAG.getConstant(Idx, MVT::getVectorBaseType(MaskVT))); 3733 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT))); 3734 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT))); 3735 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT))); 3736 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3737 &IdxVec[0], IdxVec.size()); 3738 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3739 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3740 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3741 DAG.getConstant(0, getPointerTy())); 3742 } else if (MVT::getSizeInBits(VT) == 64) { 3743 SDOperand Vec = Op.getOperand(0); 3744 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3745 if (Idx == 0) 3746 return Op; 3747 3748 // UNPCKHPD the element to the lowest double word, then movsd. 3749 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 3750 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 3751 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3752 std::vector<SDOperand> IdxVec; 3753 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorBaseType(MaskVT))); 3754 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT))); 3755 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3756 &IdxVec[0], IdxVec.size()); 3757 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3758 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3759 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3760 DAG.getConstant(0, getPointerTy())); 3761 } 3762 3763 return SDOperand(); 3764} 3765 3766SDOperand 3767X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3768 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 3769 // as its second argument. 3770 MVT::ValueType VT = Op.getValueType(); 3771 MVT::ValueType BaseVT = MVT::getVectorBaseType(VT); 3772 SDOperand N0 = Op.getOperand(0); 3773 SDOperand N1 = Op.getOperand(1); 3774 SDOperand N2 = Op.getOperand(2); 3775 if (MVT::getSizeInBits(BaseVT) == 16) { 3776 if (N1.getValueType() != MVT::i32) 3777 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3778 if (N2.getValueType() != MVT::i32) 3779 N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(), MVT::i32); 3780 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2); 3781 } else if (MVT::getSizeInBits(BaseVT) == 32) { 3782 unsigned Idx = cast<ConstantSDNode>(N2)->getValue(); 3783 if (Idx == 0) { 3784 // Use a movss. 3785 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, N1); 3786 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3787 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 3788 std::vector<SDOperand> MaskVec; 3789 MaskVec.push_back(DAG.getConstant(4, BaseVT)); 3790 for (unsigned i = 1; i <= 3; ++i) 3791 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3792 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, N0, N1, 3793 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3794 &MaskVec[0], MaskVec.size())); 3795 } else { 3796 // Use two pinsrw instructions to insert a 32 bit value. 3797 Idx <<= 1; 3798 if (MVT::isFloatingPoint(N1.getValueType())) { 3799 if (ISD::isNON_EXTLoad(N1.Val)) { 3800 // Just load directly from f32mem to GR32. 3801 LoadSDNode *LD = cast<LoadSDNode>(N1); 3802 N1 = DAG.getLoad(MVT::i32, LD->getChain(), LD->getBasePtr(), 3803 LD->getSrcValue(), LD->getSrcValueOffset()); 3804 } else { 3805 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v4f32, N1); 3806 N1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, N1); 3807 N1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, N1, 3808 DAG.getConstant(0, getPointerTy())); 3809 } 3810 } 3811 N0 = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, N0); 3812 N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1, 3813 DAG.getConstant(Idx, getPointerTy())); 3814 N1 = DAG.getNode(ISD::SRL, MVT::i32, N1, DAG.getConstant(16, MVT::i8)); 3815 N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1, 3816 DAG.getConstant(Idx+1, getPointerTy())); 3817 return DAG.getNode(ISD::BIT_CONVERT, VT, N0); 3818 } 3819 } 3820 3821 return SDOperand(); 3822} 3823 3824SDOperand 3825X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3826 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); 3827 return DAG.getNode(X86ISD::S2VEC, Op.getValueType(), AnyExt); 3828} 3829 3830// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 3831// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 3832// one of the above mentioned nodes. It has to be wrapped because otherwise 3833// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 3834// be used to form addressing mode. These wrapped nodes will be selected 3835// into MOV32ri. 3836SDOperand 3837X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 3838 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 3839 SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(), 3840 getPointerTy(), 3841 CP->getAlignment()); 3842 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3843 if (Subtarget->isTargetDarwin()) { 3844 // With PIC, the address is actually $g + Offset. 3845 if (!Subtarget->is64Bit() && 3846 getTargetMachine().getRelocationModel() == Reloc::PIC_) 3847 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3848 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result); 3849 } 3850 3851 return Result; 3852} 3853 3854SDOperand 3855X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 3856 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3857 SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); 3858 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3859 if (Subtarget->isTargetDarwin()) { 3860 // With PIC, the address is actually $g + Offset. 3861 if (!Subtarget->is64Bit() && 3862 getTargetMachine().getRelocationModel() == Reloc::PIC_) 3863 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3864 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3865 Result); 3866 3867 // For Darwin, external and weak symbols are indirect, so we want to load 3868 // the value at address GV, not the value of GV itself. This means that 3869 // the GlobalAddress must be in the base or index register of the address, 3870 // not the GV offset field. 3871 if (getTargetMachine().getRelocationModel() != Reloc::Static && 3872 Subtarget->GVRequiresExtraLoad(GV, false)) 3873 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, NULL, 0); 3874 } else if (Subtarget->GVRequiresExtraLoad(GV, false)) { 3875 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, NULL, 0); 3876 } 3877 3878 return Result; 3879} 3880 3881SDOperand 3882X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { 3883 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 3884 SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 3885 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3886 if (Subtarget->isTargetDarwin()) { 3887 // With PIC, the address is actually $g + Offset. 3888 if (!Subtarget->is64Bit() && 3889 getTargetMachine().getRelocationModel() == Reloc::PIC_) 3890 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3891 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3892 Result); 3893 } 3894 3895 return Result; 3896} 3897 3898SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { 3899 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 3900 "Not an i64 shift!"); 3901 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 3902 SDOperand ShOpLo = Op.getOperand(0); 3903 SDOperand ShOpHi = Op.getOperand(1); 3904 SDOperand ShAmt = Op.getOperand(2); 3905 SDOperand Tmp1 = isSRA ? 3906 DAG.getNode(ISD::SRA, MVT::i32, ShOpHi, DAG.getConstant(31, MVT::i8)) : 3907 DAG.getConstant(0, MVT::i32); 3908 3909 SDOperand Tmp2, Tmp3; 3910 if (Op.getOpcode() == ISD::SHL_PARTS) { 3911 Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt); 3912 Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt); 3913 } else { 3914 Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt); 3915 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt); 3916 } 3917 3918 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 3919 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, 3920 DAG.getConstant(32, MVT::i8)); 3921 SDOperand COps[]={DAG.getEntryNode(), AndNode, DAG.getConstant(0, MVT::i8)}; 3922 SDOperand InFlag = DAG.getNode(X86ISD::CMP, VTs, 2, COps, 3).getValue(1); 3923 3924 SDOperand Hi, Lo; 3925 SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); 3926 3927 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag); 3928 SmallVector<SDOperand, 4> Ops; 3929 if (Op.getOpcode() == ISD::SHL_PARTS) { 3930 Ops.push_back(Tmp2); 3931 Ops.push_back(Tmp3); 3932 Ops.push_back(CC); 3933 Ops.push_back(InFlag); 3934 Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3935 InFlag = Hi.getValue(1); 3936 3937 Ops.clear(); 3938 Ops.push_back(Tmp3); 3939 Ops.push_back(Tmp1); 3940 Ops.push_back(CC); 3941 Ops.push_back(InFlag); 3942 Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3943 } else { 3944 Ops.push_back(Tmp2); 3945 Ops.push_back(Tmp3); 3946 Ops.push_back(CC); 3947 Ops.push_back(InFlag); 3948 Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3949 InFlag = Lo.getValue(1); 3950 3951 Ops.clear(); 3952 Ops.push_back(Tmp3); 3953 Ops.push_back(Tmp1); 3954 Ops.push_back(CC); 3955 Ops.push_back(InFlag); 3956 Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3957 } 3958 3959 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32); 3960 Ops.clear(); 3961 Ops.push_back(Lo); 3962 Ops.push_back(Hi); 3963 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size()); 3964} 3965 3966SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 3967 assert(Op.getOperand(0).getValueType() <= MVT::i64 && 3968 Op.getOperand(0).getValueType() >= MVT::i16 && 3969 "Unknown SINT_TO_FP to lower!"); 3970 3971 SDOperand Result; 3972 MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); 3973 unsigned Size = MVT::getSizeInBits(SrcVT)/8; 3974 MachineFunction &MF = DAG.getMachineFunction(); 3975 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 3976 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3977 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), 3978 StackSlot, NULL, 0); 3979 3980 // Build the FILD 3981 std::vector<MVT::ValueType> Tys; 3982 Tys.push_back(MVT::f64); 3983 Tys.push_back(MVT::Other); 3984 if (X86ScalarSSE) Tys.push_back(MVT::Flag); 3985 std::vector<SDOperand> Ops; 3986 Ops.push_back(Chain); 3987 Ops.push_back(StackSlot); 3988 Ops.push_back(DAG.getValueType(SrcVT)); 3989 Result = DAG.getNode(X86ScalarSSE ? X86ISD::FILD_FLAG :X86ISD::FILD, 3990 Tys, &Ops[0], Ops.size()); 3991 3992 if (X86ScalarSSE) { 3993 Chain = Result.getValue(1); 3994 SDOperand InFlag = Result.getValue(2); 3995 3996 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 3997 // shouldn't be necessary except that RFP cannot be live across 3998 // multiple blocks. When stackifier is fixed, they can be uncoupled. 3999 MachineFunction &MF = DAG.getMachineFunction(); 4000 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 4001 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4002 std::vector<MVT::ValueType> Tys; 4003 Tys.push_back(MVT::Other); 4004 std::vector<SDOperand> Ops; 4005 Ops.push_back(Chain); 4006 Ops.push_back(Result); 4007 Ops.push_back(StackSlot); 4008 Ops.push_back(DAG.getValueType(Op.getValueType())); 4009 Ops.push_back(InFlag); 4010 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 4011 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, NULL, 0); 4012 } 4013 4014 return Result; 4015} 4016 4017SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 4018 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 && 4019 "Unknown FP_TO_SINT to lower!"); 4020 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 4021 // stack slot. 4022 MachineFunction &MF = DAG.getMachineFunction(); 4023 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; 4024 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4025 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4026 4027 unsigned Opc; 4028 switch (Op.getValueType()) { 4029 default: assert(0 && "Invalid FP_TO_SINT to lower!"); 4030 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 4031 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 4032 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 4033 } 4034 4035 SDOperand Chain = DAG.getEntryNode(); 4036 SDOperand Value = Op.getOperand(0); 4037 if (X86ScalarSSE) { 4038 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 4039 Chain = DAG.getStore(Chain, Value, StackSlot, NULL, 0); 4040 std::vector<MVT::ValueType> Tys; 4041 Tys.push_back(MVT::f64); 4042 Tys.push_back(MVT::Other); 4043 std::vector<SDOperand> Ops; 4044 Ops.push_back(Chain); 4045 Ops.push_back(StackSlot); 4046 Ops.push_back(DAG.getValueType(Op.getOperand(0).getValueType())); 4047 Value = DAG.getNode(X86ISD::FLD, Tys, &Ops[0], Ops.size()); 4048 Chain = Value.getValue(1); 4049 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4050 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4051 } 4052 4053 // Build the FP_TO_INT*_IN_MEM 4054 std::vector<SDOperand> Ops; 4055 Ops.push_back(Chain); 4056 Ops.push_back(Value); 4057 Ops.push_back(StackSlot); 4058 SDOperand FIST = DAG.getNode(Opc, MVT::Other, &Ops[0], Ops.size()); 4059 4060 // Load the result. 4061 return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0); 4062} 4063 4064SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { 4065 MVT::ValueType VT = Op.getValueType(); 4066 const Type *OpNTy = MVT::getTypeForValueType(VT); 4067 std::vector<Constant*> CV; 4068 if (VT == MVT::f64) { 4069 CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63)))); 4070 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 4071 } else { 4072 CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31)))); 4073 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 4074 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 4075 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 4076 } 4077 Constant *CS = ConstantStruct::get(CV); 4078 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4); 4079 std::vector<MVT::ValueType> Tys; 4080 Tys.push_back(VT); 4081 Tys.push_back(MVT::Other); 4082 SmallVector<SDOperand, 3> Ops; 4083 Ops.push_back(DAG.getEntryNode()); 4084 Ops.push_back(CPIdx); 4085 Ops.push_back(DAG.getSrcValue(NULL)); 4086 SDOperand Mask = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size()); 4087 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask); 4088} 4089 4090SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { 4091 MVT::ValueType VT = Op.getValueType(); 4092 const Type *OpNTy = MVT::getTypeForValueType(VT); 4093 std::vector<Constant*> CV; 4094 if (VT == MVT::f64) { 4095 CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63))); 4096 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 4097 } else { 4098 CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(1U << 31))); 4099 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 4100 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 4101 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 4102 } 4103 Constant *CS = ConstantStruct::get(CV); 4104 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4); 4105 std::vector<MVT::ValueType> Tys; 4106 Tys.push_back(VT); 4107 Tys.push_back(MVT::Other); 4108 SmallVector<SDOperand, 3> Ops; 4109 Ops.push_back(DAG.getEntryNode()); 4110 Ops.push_back(CPIdx); 4111 Ops.push_back(DAG.getSrcValue(NULL)); 4112 SDOperand Mask = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size()); 4113 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask); 4114} 4115 4116SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG, 4117 SDOperand Chain) { 4118 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 4119 SDOperand Cond; 4120 SDOperand Op0 = Op.getOperand(0); 4121 SDOperand Op1 = Op.getOperand(1); 4122 SDOperand CC = Op.getOperand(2); 4123 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 4124 const MVT::ValueType *VTs1 = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4125 const MVT::ValueType *VTs2 = DAG.getNodeValueTypes(MVT::i8, MVT::Flag); 4126 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); 4127 unsigned X86CC; 4128 4129 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, 4130 Op0, Op1, DAG)) { 4131 SDOperand Ops1[] = { Chain, Op0, Op1 }; 4132 Cond = DAG.getNode(X86ISD::CMP, VTs1, 2, Ops1, 3).getValue(1); 4133 SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond }; 4134 return DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2); 4135 } 4136 4137 assert(isFP && "Illegal integer SetCC!"); 4138 4139 SDOperand COps[] = { Chain, Op0, Op1 }; 4140 Cond = DAG.getNode(X86ISD::CMP, VTs1, 2, COps, 3).getValue(1); 4141 4142 switch (SetCCOpcode) { 4143 default: assert(false && "Illegal floating point SetCC!"); 4144 case ISD::SETOEQ: { // !PF & ZF 4145 SDOperand Ops1[] = { DAG.getConstant(X86::COND_NP, MVT::i8), Cond }; 4146 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops1, 2); 4147 SDOperand Ops2[] = { DAG.getConstant(X86::COND_E, MVT::i8), 4148 Tmp1.getValue(1) }; 4149 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2); 4150 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); 4151 } 4152 case ISD::SETUNE: { // PF | !ZF 4153 SDOperand Ops1[] = { DAG.getConstant(X86::COND_P, MVT::i8), Cond }; 4154 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops1, 2); 4155 SDOperand Ops2[] = { DAG.getConstant(X86::COND_NE, MVT::i8), 4156 Tmp1.getValue(1) }; 4157 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2); 4158 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); 4159 } 4160 } 4161} 4162 4163SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { 4164 bool addTest = true; 4165 SDOperand Chain = DAG.getEntryNode(); 4166 SDOperand Cond = Op.getOperand(0); 4167 SDOperand CC; 4168 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4169 4170 if (Cond.getOpcode() == ISD::SETCC) 4171 Cond = LowerSETCC(Cond, DAG, Chain); 4172 4173 if (Cond.getOpcode() == X86ISD::SETCC) { 4174 CC = Cond.getOperand(0); 4175 4176 // If condition flag is set by a X86ISD::CMP, then make a copy of it 4177 // (since flag operand cannot be shared). Use it as the condition setting 4178 // operand in place of the X86ISD::SETCC. 4179 // If the X86ISD::SETCC has more than one use, then perhaps it's better 4180 // to use a test instead of duplicating the X86ISD::CMP (for register 4181 // pressure reason)? 4182 SDOperand Cmp = Cond.getOperand(1); 4183 unsigned Opc = Cmp.getOpcode(); 4184 bool IllegalFPCMov = !X86ScalarSSE && 4185 MVT::isFloatingPoint(Op.getValueType()) && 4186 !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 4187 if ((Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) && 4188 !IllegalFPCMov) { 4189 SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) }; 4190 Cond = DAG.getNode(Opc, VTs, 2, Ops, 3); 4191 addTest = false; 4192 } 4193 } 4194 4195 if (addTest) { 4196 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4197 SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) }; 4198 Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3); 4199 } 4200 4201 VTs = DAG.getNodeValueTypes(Op.getValueType(), MVT::Flag); 4202 SmallVector<SDOperand, 4> Ops; 4203 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 4204 // condition is true. 4205 Ops.push_back(Op.getOperand(2)); 4206 Ops.push_back(Op.getOperand(1)); 4207 Ops.push_back(CC); 4208 Ops.push_back(Cond.getValue(1)); 4209 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 4210} 4211 4212SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { 4213 bool addTest = true; 4214 SDOperand Chain = Op.getOperand(0); 4215 SDOperand Cond = Op.getOperand(1); 4216 SDOperand Dest = Op.getOperand(2); 4217 SDOperand CC; 4218 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4219 4220 if (Cond.getOpcode() == ISD::SETCC) 4221 Cond = LowerSETCC(Cond, DAG, Chain); 4222 4223 if (Cond.getOpcode() == X86ISD::SETCC) { 4224 CC = Cond.getOperand(0); 4225 4226 // If condition flag is set by a X86ISD::CMP, then make a copy of it 4227 // (since flag operand cannot be shared). Use it as the condition setting 4228 // operand in place of the X86ISD::SETCC. 4229 // If the X86ISD::SETCC has more than one use, then perhaps it's better 4230 // to use a test instead of duplicating the X86ISD::CMP (for register 4231 // pressure reason)? 4232 SDOperand Cmp = Cond.getOperand(1); 4233 unsigned Opc = Cmp.getOpcode(); 4234 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) { 4235 SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) }; 4236 Cond = DAG.getNode(Opc, VTs, 2, Ops, 3); 4237 addTest = false; 4238 } 4239 } 4240 4241 if (addTest) { 4242 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4243 SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) }; 4244 Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3); 4245 } 4246 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), 4247 Cond, Op.getOperand(2), CC, Cond.getValue(1)); 4248} 4249 4250SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 4251 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 4252 SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); 4253 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4254 if (Subtarget->isTargetDarwin()) { 4255 // With PIC, the address is actually $g + Offset. 4256 if (!Subtarget->is64Bit() && 4257 getTargetMachine().getRelocationModel() == Reloc::PIC_) 4258 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4259 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4260 Result); 4261 } 4262 4263 return Result; 4264} 4265 4266SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { 4267 unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 4268 4269 if (Subtarget->is64Bit()) 4270 return LowerX86_64CCCCallTo(Op, DAG); 4271 else 4272 switch (CallingConv) { 4273 default: 4274 assert(0 && "Unsupported calling convention"); 4275 case CallingConv::Fast: 4276 if (EnableFastCC) { 4277 return LowerFastCCCallTo(Op, DAG, false); 4278 } 4279 // Falls through 4280 case CallingConv::C: 4281 case CallingConv::CSRet: 4282 return LowerCCCCallTo(Op, DAG); 4283 case CallingConv::X86_StdCall: 4284 return LowerStdCallCCCallTo(Op, DAG); 4285 case CallingConv::X86_FastCall: 4286 return LowerFastCCCallTo(Op, DAG, true); 4287 } 4288} 4289 4290SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { 4291 SDOperand Copy; 4292 4293 switch(Op.getNumOperands()) { 4294 default: 4295 assert(0 && "Do not know how to return this many arguments!"); 4296 abort(); 4297 case 1: // ret void. 4298 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Op.getOperand(0), 4299 DAG.getConstant(getBytesToPopOnReturn(), MVT::i16)); 4300 case 3: { 4301 MVT::ValueType ArgVT = Op.getOperand(1).getValueType(); 4302 4303 if (MVT::isVector(ArgVT) || 4304 (Subtarget->is64Bit() && MVT::isFloatingPoint(ArgVT))) { 4305 // Integer or FP vector result -> XMM0. 4306 if (DAG.getMachineFunction().liveout_empty()) 4307 DAG.getMachineFunction().addLiveOut(X86::XMM0); 4308 Copy = DAG.getCopyToReg(Op.getOperand(0), X86::XMM0, Op.getOperand(1), 4309 SDOperand()); 4310 } else if (MVT::isInteger(ArgVT)) { 4311 // Integer result -> EAX / RAX. 4312 // The C calling convention guarantees the return value has been 4313 // promoted to at least MVT::i32. The X86-64 ABI doesn't require the 4314 // value to be promoted MVT::i64. So we don't have to extend it to 4315 // 64-bit. Return the value in EAX, but mark RAX as liveout. 4316 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 4317 if (DAG.getMachineFunction().liveout_empty()) 4318 DAG.getMachineFunction().addLiveOut(Reg); 4319 4320 Reg = (ArgVT == MVT::i64) ? X86::RAX : X86::EAX; 4321 Copy = DAG.getCopyToReg(Op.getOperand(0), Reg, Op.getOperand(1), 4322 SDOperand()); 4323 } else if (!X86ScalarSSE) { 4324 // FP return with fp-stack value. 4325 if (DAG.getMachineFunction().liveout_empty()) 4326 DAG.getMachineFunction().addLiveOut(X86::ST0); 4327 4328 std::vector<MVT::ValueType> Tys; 4329 Tys.push_back(MVT::Other); 4330 Tys.push_back(MVT::Flag); 4331 std::vector<SDOperand> Ops; 4332 Ops.push_back(Op.getOperand(0)); 4333 Ops.push_back(Op.getOperand(1)); 4334 Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, &Ops[0], Ops.size()); 4335 } else { 4336 // FP return with ScalarSSE (return on fp-stack). 4337 if (DAG.getMachineFunction().liveout_empty()) 4338 DAG.getMachineFunction().addLiveOut(X86::ST0); 4339 4340 SDOperand MemLoc; 4341 SDOperand Chain = Op.getOperand(0); 4342 SDOperand Value = Op.getOperand(1); 4343 4344 if (ISD::isNON_EXTLoad(Value.Val) && 4345 (Chain == Value.getValue(1) || Chain == Value.getOperand(0))) { 4346 Chain = Value.getOperand(0); 4347 MemLoc = Value.getOperand(1); 4348 } else { 4349 // Spill the value to memory and reload it into top of stack. 4350 unsigned Size = MVT::getSizeInBits(ArgVT)/8; 4351 MachineFunction &MF = DAG.getMachineFunction(); 4352 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 4353 MemLoc = DAG.getFrameIndex(SSFI, getPointerTy()); 4354 Chain = DAG.getStore(Op.getOperand(0), Value, MemLoc, NULL, 0); 4355 } 4356 std::vector<MVT::ValueType> Tys; 4357 Tys.push_back(MVT::f64); 4358 Tys.push_back(MVT::Other); 4359 std::vector<SDOperand> Ops; 4360 Ops.push_back(Chain); 4361 Ops.push_back(MemLoc); 4362 Ops.push_back(DAG.getValueType(ArgVT)); 4363 Copy = DAG.getNode(X86ISD::FLD, Tys, &Ops[0], Ops.size()); 4364 Tys.clear(); 4365 Tys.push_back(MVT::Other); 4366 Tys.push_back(MVT::Flag); 4367 Ops.clear(); 4368 Ops.push_back(Copy.getValue(1)); 4369 Ops.push_back(Copy); 4370 Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, &Ops[0], Ops.size()); 4371 } 4372 break; 4373 } 4374 case 5: { 4375 unsigned Reg1 = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 4376 unsigned Reg2 = Subtarget->is64Bit() ? X86::RDX : X86::EDX; 4377 if (DAG.getMachineFunction().liveout_empty()) { 4378 DAG.getMachineFunction().addLiveOut(Reg1); 4379 DAG.getMachineFunction().addLiveOut(Reg2); 4380 } 4381 4382 Copy = DAG.getCopyToReg(Op.getOperand(0), Reg2, Op.getOperand(3), 4383 SDOperand()); 4384 Copy = DAG.getCopyToReg(Copy, Reg1, Op.getOperand(1), Copy.getValue(1)); 4385 break; 4386 } 4387 } 4388 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, 4389 Copy, DAG.getConstant(getBytesToPopOnReturn(), MVT::i16), 4390 Copy.getValue(1)); 4391} 4392 4393SDOperand 4394X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { 4395 MachineFunction &MF = DAG.getMachineFunction(); 4396 const Function* Fn = MF.getFunction(); 4397 if (Fn->hasExternalLinkage() && 4398 Subtarget->isTargetCygwin() && 4399 Fn->getName() == "main") 4400 MF.getInfo<X86FunctionInfo>()->setForceFramePointer(true); 4401 4402 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 4403 if (Subtarget->is64Bit()) 4404 return LowerX86_64CCCArguments(Op, DAG); 4405 else 4406 switch(CC) { 4407 default: 4408 assert(0 && "Unsupported calling convention"); 4409 case CallingConv::Fast: 4410 if (EnableFastCC) { 4411 return LowerFastCCArguments(Op, DAG); 4412 } 4413 // Falls through 4414 case CallingConv::C: 4415 case CallingConv::CSRet: 4416 return LowerCCCArguments(Op, DAG); 4417 case CallingConv::X86_StdCall: 4418 MF.getInfo<X86FunctionInfo>()->setDecorationStyle(StdCall); 4419 return LowerStdCallCCArguments(Op, DAG); 4420 case CallingConv::X86_FastCall: 4421 MF.getInfo<X86FunctionInfo>()->setDecorationStyle(FastCall); 4422 return LowerFastCallCCArguments(Op, DAG); 4423 } 4424} 4425 4426SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) { 4427 SDOperand InFlag(0, 0); 4428 SDOperand Chain = Op.getOperand(0); 4429 unsigned Align = 4430 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 4431 if (Align == 0) Align = 1; 4432 4433 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4434 // If not DWORD aligned, call memset if size is less than the threshold. 4435 // It knows how to align to the right boundary first. 4436 if ((Align & 3) != 0 || 4437 (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) { 4438 MVT::ValueType IntPtr = getPointerTy(); 4439 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 4440 std::vector<std::pair<SDOperand, const Type*> > Args; 4441 Args.push_back(std::make_pair(Op.getOperand(1), IntPtrTy)); 4442 // Extend the ubyte argument to be an int value for the call. 4443 SDOperand Val = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2)); 4444 Args.push_back(std::make_pair(Val, IntPtrTy)); 4445 Args.push_back(std::make_pair(Op.getOperand(3), IntPtrTy)); 4446 std::pair<SDOperand,SDOperand> CallResult = 4447 LowerCallTo(Chain, Type::VoidTy, false, CallingConv::C, false, 4448 DAG.getExternalSymbol("memset", IntPtr), Args, DAG); 4449 return CallResult.second; 4450 } 4451 4452 MVT::ValueType AVT; 4453 SDOperand Count; 4454 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4455 unsigned BytesLeft = 0; 4456 bool TwoRepStos = false; 4457 if (ValC) { 4458 unsigned ValReg; 4459 uint64_t Val = ValC->getValue() & 255; 4460 4461 // If the value is a constant, then we can potentially use larger sets. 4462 switch (Align & 3) { 4463 case 2: // WORD aligned 4464 AVT = MVT::i16; 4465 ValReg = X86::AX; 4466 Val = (Val << 8) | Val; 4467 break; 4468 case 0: // DWORD aligned 4469 AVT = MVT::i32; 4470 ValReg = X86::EAX; 4471 Val = (Val << 8) | Val; 4472 Val = (Val << 16) | Val; 4473 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned 4474 AVT = MVT::i64; 4475 ValReg = X86::RAX; 4476 Val = (Val << 32) | Val; 4477 } 4478 break; 4479 default: // Byte aligned 4480 AVT = MVT::i8; 4481 ValReg = X86::AL; 4482 Count = Op.getOperand(3); 4483 break; 4484 } 4485 4486 if (AVT > MVT::i8) { 4487 if (I) { 4488 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4489 Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy()); 4490 BytesLeft = I->getValue() % UBytes; 4491 } else { 4492 assert(AVT >= MVT::i32 && 4493 "Do not use rep;stos if not at least DWORD aligned"); 4494 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 4495 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 4496 TwoRepStos = true; 4497 } 4498 } 4499 4500 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT), 4501 InFlag); 4502 InFlag = Chain.getValue(1); 4503 } else { 4504 AVT = MVT::i8; 4505 Count = Op.getOperand(3); 4506 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag); 4507 InFlag = Chain.getValue(1); 4508 } 4509 4510 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4511 Count, InFlag); 4512 InFlag = Chain.getValue(1); 4513 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4514 Op.getOperand(1), InFlag); 4515 InFlag = Chain.getValue(1); 4516 4517 std::vector<MVT::ValueType> Tys; 4518 Tys.push_back(MVT::Other); 4519 Tys.push_back(MVT::Flag); 4520 std::vector<SDOperand> Ops; 4521 Ops.push_back(Chain); 4522 Ops.push_back(DAG.getValueType(AVT)); 4523 Ops.push_back(InFlag); 4524 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4525 4526 if (TwoRepStos) { 4527 InFlag = Chain.getValue(1); 4528 Count = Op.getOperand(3); 4529 MVT::ValueType CVT = Count.getValueType(); 4530 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4531 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4532 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4533 Left, InFlag); 4534 InFlag = Chain.getValue(1); 4535 Tys.clear(); 4536 Tys.push_back(MVT::Other); 4537 Tys.push_back(MVT::Flag); 4538 Ops.clear(); 4539 Ops.push_back(Chain); 4540 Ops.push_back(DAG.getValueType(MVT::i8)); 4541 Ops.push_back(InFlag); 4542 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4543 } else if (BytesLeft) { 4544 // Issue stores for the last 1 - 7 bytes. 4545 SDOperand Value; 4546 unsigned Val = ValC->getValue() & 255; 4547 unsigned Offset = I->getValue() - BytesLeft; 4548 SDOperand DstAddr = Op.getOperand(1); 4549 MVT::ValueType AddrVT = DstAddr.getValueType(); 4550 if (BytesLeft >= 4) { 4551 Val = (Val << 8) | Val; 4552 Val = (Val << 16) | Val; 4553 Value = DAG.getConstant(Val, MVT::i32); 4554 Chain = DAG.getStore(Chain, Value, 4555 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4556 DAG.getConstant(Offset, AddrVT)), 4557 NULL, 0); 4558 BytesLeft -= 4; 4559 Offset += 4; 4560 } 4561 if (BytesLeft >= 2) { 4562 Value = DAG.getConstant((Val << 8) | Val, MVT::i16); 4563 Chain = DAG.getStore(Chain, Value, 4564 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4565 DAG.getConstant(Offset, AddrVT)), 4566 NULL, 0); 4567 BytesLeft -= 2; 4568 Offset += 2; 4569 } 4570 if (BytesLeft == 1) { 4571 Value = DAG.getConstant(Val, MVT::i8); 4572 Chain = DAG.getStore(Chain, Value, 4573 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4574 DAG.getConstant(Offset, AddrVT)), 4575 NULL, 0); 4576 } 4577 } 4578 4579 return Chain; 4580} 4581 4582SDOperand X86TargetLowering::LowerMEMCPY(SDOperand Op, SelectionDAG &DAG) { 4583 SDOperand Chain = Op.getOperand(0); 4584 unsigned Align = 4585 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 4586 if (Align == 0) Align = 1; 4587 4588 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4589 // If not DWORD aligned, call memcpy if size is less than the threshold. 4590 // It knows how to align to the right boundary first. 4591 if ((Align & 3) != 0 || 4592 (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) { 4593 MVT::ValueType IntPtr = getPointerTy(); 4594 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 4595 std::vector<std::pair<SDOperand, const Type*> > Args; 4596 Args.push_back(std::make_pair(Op.getOperand(1), IntPtrTy)); 4597 Args.push_back(std::make_pair(Op.getOperand(2), IntPtrTy)); 4598 Args.push_back(std::make_pair(Op.getOperand(3), IntPtrTy)); 4599 std::pair<SDOperand,SDOperand> CallResult = 4600 LowerCallTo(Chain, Type::VoidTy, false, CallingConv::C, false, 4601 DAG.getExternalSymbol("memcpy", IntPtr), Args, DAG); 4602 return CallResult.second; 4603 } 4604 4605 MVT::ValueType AVT; 4606 SDOperand Count; 4607 unsigned BytesLeft = 0; 4608 bool TwoRepMovs = false; 4609 switch (Align & 3) { 4610 case 2: // WORD aligned 4611 AVT = MVT::i16; 4612 break; 4613 case 0: // DWORD aligned 4614 AVT = MVT::i32; 4615 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned 4616 AVT = MVT::i64; 4617 break; 4618 default: // Byte aligned 4619 AVT = MVT::i8; 4620 Count = Op.getOperand(3); 4621 break; 4622 } 4623 4624 if (AVT > MVT::i8) { 4625 if (I) { 4626 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4627 Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy()); 4628 BytesLeft = I->getValue() % UBytes; 4629 } else { 4630 assert(AVT >= MVT::i32 && 4631 "Do not use rep;movs if not at least DWORD aligned"); 4632 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 4633 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 4634 TwoRepMovs = true; 4635 } 4636 } 4637 4638 SDOperand InFlag(0, 0); 4639 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4640 Count, InFlag); 4641 InFlag = Chain.getValue(1); 4642 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4643 Op.getOperand(1), InFlag); 4644 InFlag = Chain.getValue(1); 4645 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI, 4646 Op.getOperand(2), InFlag); 4647 InFlag = Chain.getValue(1); 4648 4649 std::vector<MVT::ValueType> Tys; 4650 Tys.push_back(MVT::Other); 4651 Tys.push_back(MVT::Flag); 4652 std::vector<SDOperand> Ops; 4653 Ops.push_back(Chain); 4654 Ops.push_back(DAG.getValueType(AVT)); 4655 Ops.push_back(InFlag); 4656 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4657 4658 if (TwoRepMovs) { 4659 InFlag = Chain.getValue(1); 4660 Count = Op.getOperand(3); 4661 MVT::ValueType CVT = Count.getValueType(); 4662 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4663 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4664 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4665 Left, InFlag); 4666 InFlag = Chain.getValue(1); 4667 Tys.clear(); 4668 Tys.push_back(MVT::Other); 4669 Tys.push_back(MVT::Flag); 4670 Ops.clear(); 4671 Ops.push_back(Chain); 4672 Ops.push_back(DAG.getValueType(MVT::i8)); 4673 Ops.push_back(InFlag); 4674 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4675 } else if (BytesLeft) { 4676 // Issue loads and stores for the last 1 - 7 bytes. 4677 unsigned Offset = I->getValue() - BytesLeft; 4678 SDOperand DstAddr = Op.getOperand(1); 4679 MVT::ValueType DstVT = DstAddr.getValueType(); 4680 SDOperand SrcAddr = Op.getOperand(2); 4681 MVT::ValueType SrcVT = SrcAddr.getValueType(); 4682 SDOperand Value; 4683 if (BytesLeft >= 4) { 4684 Value = DAG.getLoad(MVT::i32, Chain, 4685 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4686 DAG.getConstant(Offset, SrcVT)), 4687 NULL, 0); 4688 Chain = Value.getValue(1); 4689 Chain = DAG.getStore(Chain, Value, 4690 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4691 DAG.getConstant(Offset, DstVT)), 4692 NULL, 0); 4693 BytesLeft -= 4; 4694 Offset += 4; 4695 } 4696 if (BytesLeft >= 2) { 4697 Value = DAG.getLoad(MVT::i16, Chain, 4698 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4699 DAG.getConstant(Offset, SrcVT)), 4700 NULL, 0); 4701 Chain = Value.getValue(1); 4702 Chain = DAG.getStore(Chain, Value, 4703 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4704 DAG.getConstant(Offset, DstVT)), 4705 NULL, 0); 4706 BytesLeft -= 2; 4707 Offset += 2; 4708 } 4709 4710 if (BytesLeft == 1) { 4711 Value = DAG.getLoad(MVT::i8, Chain, 4712 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4713 DAG.getConstant(Offset, SrcVT)), 4714 NULL, 0); 4715 Chain = Value.getValue(1); 4716 Chain = DAG.getStore(Chain, Value, 4717 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4718 DAG.getConstant(Offset, DstVT)), 4719 NULL, 0); 4720 } 4721 } 4722 4723 return Chain; 4724} 4725 4726SDOperand 4727X86TargetLowering::LowerREADCYCLCECOUNTER(SDOperand Op, SelectionDAG &DAG) { 4728 std::vector<MVT::ValueType> Tys; 4729 Tys.push_back(MVT::Other); 4730 Tys.push_back(MVT::Flag); 4731 std::vector<SDOperand> Ops; 4732 Ops.push_back(Op.getOperand(0)); 4733 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &Ops[0], Ops.size()); 4734 Ops.clear(); 4735 if (Subtarget->is64Bit()) { 4736 SDOperand Copy1 = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); 4737 SDOperand Copy2 = DAG.getCopyFromReg(Copy1.getValue(1), X86::RDX, 4738 MVT::i64, Copy1.getValue(2)); 4739 SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, Copy2, 4740 DAG.getConstant(32, MVT::i8)); 4741 Ops.push_back(DAG.getNode(ISD::OR, MVT::i64, Copy1, Tmp)); 4742 Ops.push_back(Copy2.getValue(1)); 4743 Tys[0] = MVT::i64; 4744 Tys[1] = MVT::Other; 4745 } else { 4746 SDOperand Copy1 = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); 4747 SDOperand Copy2 = DAG.getCopyFromReg(Copy1.getValue(1), X86::EDX, 4748 MVT::i32, Copy1.getValue(2)); 4749 Ops.push_back(Copy1); 4750 Ops.push_back(Copy2); 4751 Ops.push_back(Copy2.getValue(1)); 4752 Tys[0] = Tys[1] = MVT::i32; 4753 Tys.push_back(MVT::Other); 4754 } 4755 return DAG.getNode(ISD::MERGE_VALUES, Tys, &Ops[0], Ops.size()); 4756} 4757 4758SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { 4759 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2)); 4760 4761 if (!Subtarget->is64Bit()) { 4762 // vastart just stores the address of the VarArgsFrameIndex slot into the 4763 // memory location argument. 4764 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4765 return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV->getValue(), 4766 SV->getOffset()); 4767 } 4768 4769 // __va_list_tag: 4770 // gp_offset (0 - 6 * 8) 4771 // fp_offset (48 - 48 + 8 * 16) 4772 // overflow_arg_area (point to parameters coming in memory). 4773 // reg_save_area 4774 std::vector<SDOperand> MemOps; 4775 SDOperand FIN = Op.getOperand(1); 4776 // Store gp_offset 4777 SDOperand Store = DAG.getStore(Op.getOperand(0), 4778 DAG.getConstant(VarArgsGPOffset, MVT::i32), 4779 FIN, SV->getValue(), SV->getOffset()); 4780 MemOps.push_back(Store); 4781 4782 // Store fp_offset 4783 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 4784 DAG.getConstant(4, getPointerTy())); 4785 Store = DAG.getStore(Op.getOperand(0), 4786 DAG.getConstant(VarArgsFPOffset, MVT::i32), 4787 FIN, SV->getValue(), SV->getOffset()); 4788 MemOps.push_back(Store); 4789 4790 // Store ptr to overflow_arg_area 4791 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 4792 DAG.getConstant(4, getPointerTy())); 4793 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4794 Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV->getValue(), 4795 SV->getOffset()); 4796 MemOps.push_back(Store); 4797 4798 // Store ptr to reg_save_area. 4799 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 4800 DAG.getConstant(8, getPointerTy())); 4801 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 4802 Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV->getValue(), 4803 SV->getOffset()); 4804 MemOps.push_back(Store); 4805 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size()); 4806} 4807 4808SDOperand 4809X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 4810 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); 4811 switch (IntNo) { 4812 default: return SDOperand(); // Don't custom lower most intrinsics. 4813 // Comparison intrinsics. 4814 case Intrinsic::x86_sse_comieq_ss: 4815 case Intrinsic::x86_sse_comilt_ss: 4816 case Intrinsic::x86_sse_comile_ss: 4817 case Intrinsic::x86_sse_comigt_ss: 4818 case Intrinsic::x86_sse_comige_ss: 4819 case Intrinsic::x86_sse_comineq_ss: 4820 case Intrinsic::x86_sse_ucomieq_ss: 4821 case Intrinsic::x86_sse_ucomilt_ss: 4822 case Intrinsic::x86_sse_ucomile_ss: 4823 case Intrinsic::x86_sse_ucomigt_ss: 4824 case Intrinsic::x86_sse_ucomige_ss: 4825 case Intrinsic::x86_sse_ucomineq_ss: 4826 case Intrinsic::x86_sse2_comieq_sd: 4827 case Intrinsic::x86_sse2_comilt_sd: 4828 case Intrinsic::x86_sse2_comile_sd: 4829 case Intrinsic::x86_sse2_comigt_sd: 4830 case Intrinsic::x86_sse2_comige_sd: 4831 case Intrinsic::x86_sse2_comineq_sd: 4832 case Intrinsic::x86_sse2_ucomieq_sd: 4833 case Intrinsic::x86_sse2_ucomilt_sd: 4834 case Intrinsic::x86_sse2_ucomile_sd: 4835 case Intrinsic::x86_sse2_ucomigt_sd: 4836 case Intrinsic::x86_sse2_ucomige_sd: 4837 case Intrinsic::x86_sse2_ucomineq_sd: { 4838 unsigned Opc = 0; 4839 ISD::CondCode CC = ISD::SETCC_INVALID; 4840 switch (IntNo) { 4841 default: break; 4842 case Intrinsic::x86_sse_comieq_ss: 4843 case Intrinsic::x86_sse2_comieq_sd: 4844 Opc = X86ISD::COMI; 4845 CC = ISD::SETEQ; 4846 break; 4847 case Intrinsic::x86_sse_comilt_ss: 4848 case Intrinsic::x86_sse2_comilt_sd: 4849 Opc = X86ISD::COMI; 4850 CC = ISD::SETLT; 4851 break; 4852 case Intrinsic::x86_sse_comile_ss: 4853 case Intrinsic::x86_sse2_comile_sd: 4854 Opc = X86ISD::COMI; 4855 CC = ISD::SETLE; 4856 break; 4857 case Intrinsic::x86_sse_comigt_ss: 4858 case Intrinsic::x86_sse2_comigt_sd: 4859 Opc = X86ISD::COMI; 4860 CC = ISD::SETGT; 4861 break; 4862 case Intrinsic::x86_sse_comige_ss: 4863 case Intrinsic::x86_sse2_comige_sd: 4864 Opc = X86ISD::COMI; 4865 CC = ISD::SETGE; 4866 break; 4867 case Intrinsic::x86_sse_comineq_ss: 4868 case Intrinsic::x86_sse2_comineq_sd: 4869 Opc = X86ISD::COMI; 4870 CC = ISD::SETNE; 4871 break; 4872 case Intrinsic::x86_sse_ucomieq_ss: 4873 case Intrinsic::x86_sse2_ucomieq_sd: 4874 Opc = X86ISD::UCOMI; 4875 CC = ISD::SETEQ; 4876 break; 4877 case Intrinsic::x86_sse_ucomilt_ss: 4878 case Intrinsic::x86_sse2_ucomilt_sd: 4879 Opc = X86ISD::UCOMI; 4880 CC = ISD::SETLT; 4881 break; 4882 case Intrinsic::x86_sse_ucomile_ss: 4883 case Intrinsic::x86_sse2_ucomile_sd: 4884 Opc = X86ISD::UCOMI; 4885 CC = ISD::SETLE; 4886 break; 4887 case Intrinsic::x86_sse_ucomigt_ss: 4888 case Intrinsic::x86_sse2_ucomigt_sd: 4889 Opc = X86ISD::UCOMI; 4890 CC = ISD::SETGT; 4891 break; 4892 case Intrinsic::x86_sse_ucomige_ss: 4893 case Intrinsic::x86_sse2_ucomige_sd: 4894 Opc = X86ISD::UCOMI; 4895 CC = ISD::SETGE; 4896 break; 4897 case Intrinsic::x86_sse_ucomineq_ss: 4898 case Intrinsic::x86_sse2_ucomineq_sd: 4899 Opc = X86ISD::UCOMI; 4900 CC = ISD::SETNE; 4901 break; 4902 } 4903 4904 unsigned X86CC; 4905 SDOperand LHS = Op.getOperand(1); 4906 SDOperand RHS = Op.getOperand(2); 4907 translateX86CC(CC, true, X86CC, LHS, RHS, DAG); 4908 4909 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4910 SDOperand Ops1[] = { DAG.getEntryNode(), LHS, RHS }; 4911 SDOperand Cond = DAG.getNode(Opc, VTs, 2, Ops1, 3); 4912 VTs = DAG.getNodeValueTypes(MVT::i8, MVT::Flag); 4913 SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond }; 4914 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2); 4915 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); 4916 } 4917 } 4918} 4919 4920/// LowerOperation - Provide custom lowering hooks for some operations. 4921/// 4922SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 4923 switch (Op.getOpcode()) { 4924 default: assert(0 && "Should not custom lower this!"); 4925 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 4926 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 4927 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 4928 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 4929 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 4930 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4931 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 4932 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 4933 case ISD::SHL_PARTS: 4934 case ISD::SRA_PARTS: 4935 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 4936 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 4937 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 4938 case ISD::FABS: return LowerFABS(Op, DAG); 4939 case ISD::FNEG: return LowerFNEG(Op, DAG); 4940 case ISD::SETCC: return LowerSETCC(Op, DAG, DAG.getEntryNode()); 4941 case ISD::SELECT: return LowerSELECT(Op, DAG); 4942 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 4943 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 4944 case ISD::CALL: return LowerCALL(Op, DAG); 4945 case ISD::RET: return LowerRET(Op, DAG); 4946 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); 4947 case ISD::MEMSET: return LowerMEMSET(Op, DAG); 4948 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG); 4949 case ISD::READCYCLECOUNTER: return LowerREADCYCLCECOUNTER(Op, DAG); 4950 case ISD::VASTART: return LowerVASTART(Op, DAG); 4951 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 4952 } 4953} 4954 4955const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 4956 switch (Opcode) { 4957 default: return NULL; 4958 case X86ISD::SHLD: return "X86ISD::SHLD"; 4959 case X86ISD::SHRD: return "X86ISD::SHRD"; 4960 case X86ISD::FAND: return "X86ISD::FAND"; 4961 case X86ISD::FXOR: return "X86ISD::FXOR"; 4962 case X86ISD::FILD: return "X86ISD::FILD"; 4963 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 4964 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 4965 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 4966 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 4967 case X86ISD::FLD: return "X86ISD::FLD"; 4968 case X86ISD::FST: return "X86ISD::FST"; 4969 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT"; 4970 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT"; 4971 case X86ISD::CALL: return "X86ISD::CALL"; 4972 case X86ISD::TAILCALL: return "X86ISD::TAILCALL"; 4973 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 4974 case X86ISD::CMP: return "X86ISD::CMP"; 4975 case X86ISD::COMI: return "X86ISD::COMI"; 4976 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 4977 case X86ISD::SETCC: return "X86ISD::SETCC"; 4978 case X86ISD::CMOV: return "X86ISD::CMOV"; 4979 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 4980 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 4981 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 4982 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 4983 case X86ISD::LOAD_PACK: return "X86ISD::LOAD_PACK"; 4984 case X86ISD::LOAD_UA: return "X86ISD::LOAD_UA"; 4985 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 4986 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 4987 case X86ISD::S2VEC: return "X86ISD::S2VEC"; 4988 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 4989 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 4990 case X86ISD::FMAX: return "X86ISD::FMAX"; 4991 case X86ISD::FMIN: return "X86ISD::FMIN"; 4992 } 4993} 4994 4995/// isLegalAddressImmediate - Return true if the integer value or 4996/// GlobalValue can be used as the offset of the target addressing mode. 4997bool X86TargetLowering::isLegalAddressImmediate(int64_t V) const { 4998 // X86 allows a sign-extended 32-bit immediate field. 4999 return (V > -(1LL << 32) && V < (1LL << 32)-1); 5000} 5001 5002bool X86TargetLowering::isLegalAddressImmediate(GlobalValue *GV) const { 5003 // In 64-bit mode, GV is 64-bit so it won't fit in the 32-bit displacement 5004 // field unless we are in small code model. 5005 if (Subtarget->is64Bit() && 5006 getTargetMachine().getCodeModel() != CodeModel::Small) 5007 return false; 5008 Reloc::Model RModel = getTargetMachine().getRelocationModel(); 5009 return (RModel == Reloc::Static) || 5010 !Subtarget->GVRequiresExtraLoad(GV, false); 5011} 5012 5013/// isShuffleMaskLegal - Targets can use this to indicate that they only 5014/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 5015/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 5016/// are assumed to be legal. 5017bool 5018X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { 5019 // Only do shuffles on 128-bit vector types for now. 5020 if (MVT::getSizeInBits(VT) == 64) return false; 5021 return (Mask.Val->getNumOperands() <= 4 || 5022 isSplatMask(Mask.Val) || 5023 isPSHUFHW_PSHUFLWMask(Mask.Val) || 5024 X86::isUNPCKLMask(Mask.Val) || 5025 X86::isUNPCKL_v_undef_Mask(Mask.Val) || 5026 X86::isUNPCKHMask(Mask.Val)); 5027} 5028 5029bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps, 5030 MVT::ValueType EVT, 5031 SelectionDAG &DAG) const { 5032 unsigned NumElts = BVOps.size(); 5033 // Only do shuffles on 128-bit vector types for now. 5034 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; 5035 if (NumElts == 2) return true; 5036 if (NumElts == 4) { 5037 return (isMOVLMask(BVOps) || isCommutedMOVL(BVOps, true) || 5038 isSHUFPMask(BVOps) || isCommutedSHUFP(BVOps)); 5039 } 5040 return false; 5041} 5042 5043//===----------------------------------------------------------------------===// 5044// X86 Scheduler Hooks 5045//===----------------------------------------------------------------------===// 5046 5047MachineBasicBlock * 5048X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 5049 MachineBasicBlock *BB) { 5050 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5051 switch (MI->getOpcode()) { 5052 default: assert(false && "Unexpected instr type to insert"); 5053 case X86::CMOV_FR32: 5054 case X86::CMOV_FR64: 5055 case X86::CMOV_V4F32: 5056 case X86::CMOV_V2F64: 5057 case X86::CMOV_V2I64: { 5058 // To "insert" a SELECT_CC instruction, we actually have to insert the 5059 // diamond control-flow pattern. The incoming instruction knows the 5060 // destination vreg to set, the condition code register to branch on, the 5061 // true/false values to select between, and a branch opcode to use. 5062 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5063 ilist<MachineBasicBlock>::iterator It = BB; 5064 ++It; 5065 5066 // thisMBB: 5067 // ... 5068 // TrueVal = ... 5069 // cmpTY ccX, r1, r2 5070 // bCC copy1MBB 5071 // fallthrough --> copy0MBB 5072 MachineBasicBlock *thisMBB = BB; 5073 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 5074 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 5075 unsigned Opc = 5076 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 5077 BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB); 5078 MachineFunction *F = BB->getParent(); 5079 F->getBasicBlockList().insert(It, copy0MBB); 5080 F->getBasicBlockList().insert(It, sinkMBB); 5081 // Update machine-CFG edges by first adding all successors of the current 5082 // block to the new block which will contain the Phi node for the select. 5083 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 5084 e = BB->succ_end(); i != e; ++i) 5085 sinkMBB->addSuccessor(*i); 5086 // Next, remove all successors of the current block, and add the true 5087 // and fallthrough blocks as its successors. 5088 while(!BB->succ_empty()) 5089 BB->removeSuccessor(BB->succ_begin()); 5090 BB->addSuccessor(copy0MBB); 5091 BB->addSuccessor(sinkMBB); 5092 5093 // copy0MBB: 5094 // %FalseValue = ... 5095 // # fallthrough to sinkMBB 5096 BB = copy0MBB; 5097 5098 // Update machine-CFG edges 5099 BB->addSuccessor(sinkMBB); 5100 5101 // sinkMBB: 5102 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 5103 // ... 5104 BB = sinkMBB; 5105 BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg()) 5106 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 5107 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 5108 5109 delete MI; // The pseudo instruction is gone now. 5110 return BB; 5111 } 5112 5113 case X86::FP_TO_INT16_IN_MEM: 5114 case X86::FP_TO_INT32_IN_MEM: 5115 case X86::FP_TO_INT64_IN_MEM: { 5116 // Change the floating point control register to use "round towards zero" 5117 // mode when truncating to an integer value. 5118 MachineFunction *F = BB->getParent(); 5119 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2); 5120 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx); 5121 5122 // Load the old value of the high byte of the control word... 5123 unsigned OldCW = 5124 F->getSSARegMap()->createVirtualRegister(X86::GR16RegisterClass); 5125 addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx); 5126 5127 // Set the high part to be round to zero... 5128 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx) 5129 .addImm(0xC7F); 5130 5131 // Reload the modified control word now... 5132 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5133 5134 // Restore the memory image of control word to original value 5135 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx) 5136 .addReg(OldCW); 5137 5138 // Get the X86 opcode to use. 5139 unsigned Opc; 5140 switch (MI->getOpcode()) { 5141 default: assert(0 && "illegal opcode!"); 5142 case X86::FP_TO_INT16_IN_MEM: Opc = X86::FpIST16m; break; 5143 case X86::FP_TO_INT32_IN_MEM: Opc = X86::FpIST32m; break; 5144 case X86::FP_TO_INT64_IN_MEM: Opc = X86::FpIST64m; break; 5145 } 5146 5147 X86AddressMode AM; 5148 MachineOperand &Op = MI->getOperand(0); 5149 if (Op.isRegister()) { 5150 AM.BaseType = X86AddressMode::RegBase; 5151 AM.Base.Reg = Op.getReg(); 5152 } else { 5153 AM.BaseType = X86AddressMode::FrameIndexBase; 5154 AM.Base.FrameIndex = Op.getFrameIndex(); 5155 } 5156 Op = MI->getOperand(1); 5157 if (Op.isImmediate()) 5158 AM.Scale = Op.getImm(); 5159 Op = MI->getOperand(2); 5160 if (Op.isImmediate()) 5161 AM.IndexReg = Op.getImm(); 5162 Op = MI->getOperand(3); 5163 if (Op.isGlobalAddress()) { 5164 AM.GV = Op.getGlobal(); 5165 } else { 5166 AM.Disp = Op.getImm(); 5167 } 5168 addFullAddress(BuildMI(BB, TII->get(Opc)), AM) 5169 .addReg(MI->getOperand(4).getReg()); 5170 5171 // Reload the original control word now. 5172 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5173 5174 delete MI; // The pseudo instruction is gone now. 5175 return BB; 5176 } 5177 } 5178} 5179 5180//===----------------------------------------------------------------------===// 5181// X86 Optimization Hooks 5182//===----------------------------------------------------------------------===// 5183 5184void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 5185 uint64_t Mask, 5186 uint64_t &KnownZero, 5187 uint64_t &KnownOne, 5188 unsigned Depth) const { 5189 unsigned Opc = Op.getOpcode(); 5190 assert((Opc >= ISD::BUILTIN_OP_END || 5191 Opc == ISD::INTRINSIC_WO_CHAIN || 5192 Opc == ISD::INTRINSIC_W_CHAIN || 5193 Opc == ISD::INTRINSIC_VOID) && 5194 "Should use MaskedValueIsZero if you don't know whether Op" 5195 " is a target node!"); 5196 5197 KnownZero = KnownOne = 0; // Don't know anything. 5198 switch (Opc) { 5199 default: break; 5200 case X86ISD::SETCC: 5201 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL); 5202 break; 5203 } 5204} 5205 5206/// getShuffleScalarElt - Returns the scalar element that will make up the ith 5207/// element of the result of the vector shuffle. 5208static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) { 5209 MVT::ValueType VT = N->getValueType(0); 5210 SDOperand PermMask = N->getOperand(2); 5211 unsigned NumElems = PermMask.getNumOperands(); 5212 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1); 5213 i %= NumElems; 5214 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5215 return (i == 0) 5216 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT)); 5217 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) { 5218 SDOperand Idx = PermMask.getOperand(i); 5219 if (Idx.getOpcode() == ISD::UNDEF) 5220 return DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT)); 5221 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG); 5222 } 5223 return SDOperand(); 5224} 5225 5226/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 5227/// node is a GlobalAddress + an offset. 5228static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) { 5229 unsigned Opc = N->getOpcode(); 5230 if (Opc == X86ISD::Wrapper) { 5231 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) { 5232 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 5233 return true; 5234 } 5235 } else if (Opc == ISD::ADD) { 5236 SDOperand N1 = N->getOperand(0); 5237 SDOperand N2 = N->getOperand(1); 5238 if (isGAPlusOffset(N1.Val, GA, Offset)) { 5239 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 5240 if (V) { 5241 Offset += V->getSignExtended(); 5242 return true; 5243 } 5244 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 5245 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 5246 if (V) { 5247 Offset += V->getSignExtended(); 5248 return true; 5249 } 5250 } 5251 } 5252 return false; 5253} 5254 5255/// isConsecutiveLoad - Returns true if N is loading from an address of Base 5256/// + Dist * Size. 5257static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size, 5258 MachineFrameInfo *MFI) { 5259 if (N->getOperand(0).Val != Base->getOperand(0).Val) 5260 return false; 5261 5262 SDOperand Loc = N->getOperand(1); 5263 SDOperand BaseLoc = Base->getOperand(1); 5264 if (Loc.getOpcode() == ISD::FrameIndex) { 5265 if (BaseLoc.getOpcode() != ISD::FrameIndex) 5266 return false; 5267 int FI = dyn_cast<FrameIndexSDNode>(Loc)->getIndex(); 5268 int BFI = dyn_cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 5269 int FS = MFI->getObjectSize(FI); 5270 int BFS = MFI->getObjectSize(BFI); 5271 if (FS != BFS || FS != Size) return false; 5272 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size); 5273 } else { 5274 GlobalValue *GV1 = NULL; 5275 GlobalValue *GV2 = NULL; 5276 int64_t Offset1 = 0; 5277 int64_t Offset2 = 0; 5278 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 5279 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 5280 if (isGA1 && isGA2 && GV1 == GV2) 5281 return Offset1 == (Offset2 + Dist*Size); 5282 } 5283 5284 return false; 5285} 5286 5287static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI, 5288 const X86Subtarget *Subtarget) { 5289 GlobalValue *GV; 5290 int64_t Offset; 5291 if (isGAPlusOffset(Base, GV, Offset)) 5292 return (GV->getAlignment() >= 16 && (Offset % 16) == 0); 5293 else { 5294 assert(Base->getOpcode() == ISD::FrameIndex && "Unexpected base node!"); 5295 int BFI = dyn_cast<FrameIndexSDNode>(Base)->getIndex(); 5296 if (BFI < 0) 5297 // Fixed objects do not specify alignment, however the offsets are known. 5298 return ((Subtarget->getStackAlignment() % 16) == 0 && 5299 (MFI->getObjectOffset(BFI) % 16) == 0); 5300 else 5301 return MFI->getObjectAlignment(BFI) >= 16; 5302 } 5303 return false; 5304} 5305 5306 5307/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 5308/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 5309/// if the load addresses are consecutive, non-overlapping, and in the right 5310/// order. 5311static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 5312 const X86Subtarget *Subtarget) { 5313 MachineFunction &MF = DAG.getMachineFunction(); 5314 MachineFrameInfo *MFI = MF.getFrameInfo(); 5315 MVT::ValueType VT = N->getValueType(0); 5316 MVT::ValueType EVT = MVT::getVectorBaseType(VT); 5317 SDOperand PermMask = N->getOperand(2); 5318 int NumElems = (int)PermMask.getNumOperands(); 5319 SDNode *Base = NULL; 5320 for (int i = 0; i < NumElems; ++i) { 5321 SDOperand Idx = PermMask.getOperand(i); 5322 if (Idx.getOpcode() == ISD::UNDEF) { 5323 if (!Base) return SDOperand(); 5324 } else { 5325 SDOperand Arg = 5326 getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG); 5327 if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val)) 5328 return SDOperand(); 5329 if (!Base) 5330 Base = Arg.Val; 5331 else if (!isConsecutiveLoad(Arg.Val, Base, 5332 i, MVT::getSizeInBits(EVT)/8,MFI)) 5333 return SDOperand(); 5334 } 5335 } 5336 5337 bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget); 5338 if (isAlign16) { 5339 LoadSDNode *LD = cast<LoadSDNode>(Base); 5340 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 5341 LD->getSrcValueOffset()); 5342 } else { 5343 // Just use movups, it's shorter. 5344 std::vector<MVT::ValueType> Tys; 5345 Tys.push_back(MVT::v4f32); 5346 Tys.push_back(MVT::Other); 5347 SmallVector<SDOperand, 3> Ops; 5348 Ops.push_back(Base->getOperand(0)); 5349 Ops.push_back(Base->getOperand(1)); 5350 Ops.push_back(Base->getOperand(2)); 5351 return DAG.getNode(ISD::BIT_CONVERT, VT, 5352 DAG.getNode(X86ISD::LOAD_UA, Tys, &Ops[0], Ops.size())); 5353 } 5354} 5355 5356/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 5357static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 5358 const X86Subtarget *Subtarget) { 5359 SDOperand Cond = N->getOperand(0); 5360 5361 // If we have SSE[12] support, try to form min/max nodes. 5362 if (Subtarget->hasSSE2() && 5363 (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) { 5364 if (Cond.getOpcode() == ISD::SETCC) { 5365 // Get the LHS/RHS of the select. 5366 SDOperand LHS = N->getOperand(1); 5367 SDOperand RHS = N->getOperand(2); 5368 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 5369 5370 unsigned Opcode = 0; 5371 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) { 5372 switch (CC) { 5373 default: break; 5374 case ISD::SETOLE: // (X <= Y) ? X : Y -> min 5375 case ISD::SETULE: 5376 case ISD::SETLE: 5377 if (!UnsafeFPMath) break; 5378 // FALL THROUGH. 5379 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min 5380 case ISD::SETLT: 5381 Opcode = X86ISD::FMIN; 5382 break; 5383 5384 case ISD::SETOGT: // (X > Y) ? X : Y -> max 5385 case ISD::SETUGT: 5386 case ISD::SETGT: 5387 if (!UnsafeFPMath) break; 5388 // FALL THROUGH. 5389 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max 5390 case ISD::SETGE: 5391 Opcode = X86ISD::FMAX; 5392 break; 5393 } 5394 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) { 5395 switch (CC) { 5396 default: break; 5397 case ISD::SETOGT: // (X > Y) ? Y : X -> min 5398 case ISD::SETUGT: 5399 case ISD::SETGT: 5400 if (!UnsafeFPMath) break; 5401 // FALL THROUGH. 5402 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min 5403 case ISD::SETGE: 5404 Opcode = X86ISD::FMIN; 5405 break; 5406 5407 case ISD::SETOLE: // (X <= Y) ? Y : X -> max 5408 case ISD::SETULE: 5409 case ISD::SETLE: 5410 if (!UnsafeFPMath) break; 5411 // FALL THROUGH. 5412 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max 5413 case ISD::SETLT: 5414 Opcode = X86ISD::FMAX; 5415 break; 5416 } 5417 } 5418 5419 if (Opcode) 5420 return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS); 5421 } 5422 5423 } 5424 5425 return SDOperand(); 5426} 5427 5428 5429SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, 5430 DAGCombinerInfo &DCI) const { 5431 SelectionDAG &DAG = DCI.DAG; 5432 switch (N->getOpcode()) { 5433 default: break; 5434 case ISD::VECTOR_SHUFFLE: 5435 return PerformShuffleCombine(N, DAG, Subtarget); 5436 case ISD::SELECT: 5437 return PerformSELECTCombine(N, DAG, Subtarget); 5438 } 5439 5440 return SDOperand(); 5441} 5442 5443//===----------------------------------------------------------------------===// 5444// X86 Inline Assembly Support 5445//===----------------------------------------------------------------------===// 5446 5447/// getConstraintType - Given a constraint letter, return the type of 5448/// constraint it is for this target. 5449X86TargetLowering::ConstraintType 5450X86TargetLowering::getConstraintType(char ConstraintLetter) const { 5451 switch (ConstraintLetter) { 5452 case 'A': 5453 case 'r': 5454 case 'R': 5455 case 'l': 5456 case 'q': 5457 case 'Q': 5458 case 'x': 5459 case 'Y': 5460 return C_RegisterClass; 5461 default: return TargetLowering::getConstraintType(ConstraintLetter); 5462 } 5463} 5464 5465/// isOperandValidForConstraint - Return the specified operand (possibly 5466/// modified) if the specified SDOperand is valid for the specified target 5467/// constraint letter, otherwise return null. 5468SDOperand X86TargetLowering:: 5469isOperandValidForConstraint(SDOperand Op, char Constraint, SelectionDAG &DAG) { 5470 switch (Constraint) { 5471 default: break; 5472 case 'i': 5473 // Literal immediates are always ok. 5474 if (isa<ConstantSDNode>(Op)) return Op; 5475 5476 // If we are in non-pic codegen mode, we allow the address of a global to 5477 // be used with 'i'. 5478 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) { 5479 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 5480 return SDOperand(0, 0); 5481 5482 if (GA->getOpcode() != ISD::TargetGlobalAddress) 5483 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), 5484 GA->getOffset()); 5485 return Op; 5486 } 5487 5488 // Otherwise, not valid for this mode. 5489 return SDOperand(0, 0); 5490 } 5491 return TargetLowering::isOperandValidForConstraint(Op, Constraint, DAG); 5492} 5493 5494 5495std::vector<unsigned> X86TargetLowering:: 5496getRegClassForInlineAsmConstraint(const std::string &Constraint, 5497 MVT::ValueType VT) const { 5498 if (Constraint.size() == 1) { 5499 // FIXME: not handling fp-stack yet! 5500 // FIXME: not handling MMX registers yet ('y' constraint). 5501 switch (Constraint[0]) { // GCC X86 Constraint Letters 5502 default: break; // Unknown constraint letter 5503 case 'A': // EAX/EDX 5504 if (VT == MVT::i32 || VT == MVT::i64) 5505 return make_vector<unsigned>(X86::EAX, X86::EDX, 0); 5506 break; 5507 case 'r': // GENERAL_REGS 5508 case 'R': // LEGACY_REGS 5509 if (VT == MVT::i64 && Subtarget->is64Bit()) 5510 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 5511 X86::RSI, X86::RDI, X86::RBP, X86::RSP, 5512 X86::R8, X86::R9, X86::R10, X86::R11, 5513 X86::R12, X86::R13, X86::R14, X86::R15, 0); 5514 if (VT == MVT::i32) 5515 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 5516 X86::ESI, X86::EDI, X86::EBP, X86::ESP, 0); 5517 else if (VT == MVT::i16) 5518 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 5519 X86::SI, X86::DI, X86::BP, X86::SP, 0); 5520 else if (VT == MVT::i8) 5521 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0); 5522 break; 5523 case 'l': // INDEX_REGS 5524 if (VT == MVT::i32) 5525 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 5526 X86::ESI, X86::EDI, X86::EBP, 0); 5527 else if (VT == MVT::i16) 5528 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 5529 X86::SI, X86::DI, X86::BP, 0); 5530 else if (VT == MVT::i8) 5531 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0); 5532 break; 5533 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode) 5534 case 'Q': // Q_REGS 5535 if (VT == MVT::i32) 5536 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 5537 else if (VT == MVT::i16) 5538 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 5539 else if (VT == MVT::i8) 5540 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0); 5541 break; 5542 case 'x': // SSE_REGS if SSE1 allowed 5543 if (Subtarget->hasSSE1()) 5544 return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 5545 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7, 5546 0); 5547 return std::vector<unsigned>(); 5548 case 'Y': // SSE_REGS if SSE2 allowed 5549 if (Subtarget->hasSSE2()) 5550 return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 5551 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7, 5552 0); 5553 return std::vector<unsigned>(); 5554 } 5555 } 5556 5557 return std::vector<unsigned>(); 5558} 5559 5560std::pair<unsigned, const TargetRegisterClass*> 5561X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 5562 MVT::ValueType VT) const { 5563 // Use the default implementation in TargetLowering to convert the register 5564 // constraint into a member of a register class. 5565 std::pair<unsigned, const TargetRegisterClass*> Res; 5566 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 5567 5568 // Not found as a standard register? 5569 if (Res.second == 0) { 5570 // GCC calls "st(0)" just plain "st". 5571 if (StringsEqualNoCase("{st}", Constraint)) { 5572 Res.first = X86::ST0; 5573 Res.second = X86::RSTRegisterClass; 5574 } 5575 5576 return Res; 5577 } 5578 5579 // Otherwise, check to see if this is a register class of the wrong value 5580 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 5581 // turn into {ax},{dx}. 5582 if (Res.second->hasType(VT)) 5583 return Res; // Correct type already, nothing to do. 5584 5585 // All of the single-register GCC register classes map their values onto 5586 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 5587 // really want an 8-bit or 32-bit register, map to the appropriate register 5588 // class and return the appropriate register. 5589 if (Res.second != X86::GR16RegisterClass) 5590 return Res; 5591 5592 if (VT == MVT::i8) { 5593 unsigned DestReg = 0; 5594 switch (Res.first) { 5595 default: break; 5596 case X86::AX: DestReg = X86::AL; break; 5597 case X86::DX: DestReg = X86::DL; break; 5598 case X86::CX: DestReg = X86::CL; break; 5599 case X86::BX: DestReg = X86::BL; break; 5600 } 5601 if (DestReg) { 5602 Res.first = DestReg; 5603 Res.second = Res.second = X86::GR8RegisterClass; 5604 } 5605 } else if (VT == MVT::i32) { 5606 unsigned DestReg = 0; 5607 switch (Res.first) { 5608 default: break; 5609 case X86::AX: DestReg = X86::EAX; break; 5610 case X86::DX: DestReg = X86::EDX; break; 5611 case X86::CX: DestReg = X86::ECX; break; 5612 case X86::BX: DestReg = X86::EBX; break; 5613 case X86::SI: DestReg = X86::ESI; break; 5614 case X86::DI: DestReg = X86::EDI; break; 5615 case X86::BP: DestReg = X86::EBP; break; 5616 case X86::SP: DestReg = X86::ESP; break; 5617 } 5618 if (DestReg) { 5619 Res.first = DestReg; 5620 Res.second = Res.second = X86::GR32RegisterClass; 5621 } 5622 } else if (VT == MVT::i64) { 5623 unsigned DestReg = 0; 5624 switch (Res.first) { 5625 default: break; 5626 case X86::AX: DestReg = X86::RAX; break; 5627 case X86::DX: DestReg = X86::RDX; break; 5628 case X86::CX: DestReg = X86::RCX; break; 5629 case X86::BX: DestReg = X86::RBX; break; 5630 case X86::SI: DestReg = X86::RSI; break; 5631 case X86::DI: DestReg = X86::RDI; break; 5632 case X86::BP: DestReg = X86::RBP; break; 5633 case X86::SP: DestReg = X86::RSP; break; 5634 } 5635 if (DestReg) { 5636 Res.first = DestReg; 5637 Res.second = Res.second = X86::GR64RegisterClass; 5638 } 5639 } 5640 5641 return Res; 5642} 5643