X86ISelLowering.cpp revision 2b2bc688849234b9ee5e0c8704a2984f0e9cbba3
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by Chris Lattner and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86ISelLowering.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86TargetMachine.h" 20#include "llvm/CallingConv.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/Function.h" 24#include "llvm/Intrinsics.h" 25#include "llvm/ADT/VectorExtras.h" 26#include "llvm/Analysis/ScalarEvolutionExpressions.h" 27#include "llvm/CodeGen/MachineFrameInfo.h" 28#include "llvm/CodeGen/MachineFunction.h" 29#include "llvm/CodeGen/MachineInstrBuilder.h" 30#include "llvm/CodeGen/SelectionDAG.h" 31#include "llvm/CodeGen/SSARegMap.h" 32#include "llvm/Support/MathExtras.h" 33#include "llvm/Target/TargetOptions.h" 34#include "llvm/Support/CommandLine.h" 35#include "llvm/ADT/StringExtras.h" 36using namespace llvm; 37 38// FIXME: temporary. 39static cl::opt<bool> EnableFastCC("enable-x86-fastcc", cl::Hidden, 40 cl::desc("Enable fastcc on X86")); 41X86TargetLowering::X86TargetLowering(TargetMachine &TM) 42 : TargetLowering(TM) { 43 Subtarget = &TM.getSubtarget<X86Subtarget>(); 44 X86ScalarSSE = Subtarget->hasSSE2(); 45 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 46 47 // Set up the TargetLowering object. 48 49 // X86 is weird, it always uses i8 for shift amounts and setcc results. 50 setShiftAmountType(MVT::i8); 51 setSetCCResultType(MVT::i8); 52 setSetCCResultContents(ZeroOrOneSetCCResult); 53 setSchedulingPreference(SchedulingForRegPressure); 54 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0 55 setStackPointerRegisterToSaveRestore(X86StackPtr); 56 57 if (Subtarget->isTargetDarwin()) { 58 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 59 setUseUnderscoreSetJmp(false); 60 setUseUnderscoreLongJmp(false); 61 } else if (Subtarget->isTargetCygwin()) { 62 // MS runtime is weird: it exports _setjmp, but longjmp! 63 setUseUnderscoreSetJmp(true); 64 setUseUnderscoreLongJmp(false); 65 } else { 66 setUseUnderscoreSetJmp(true); 67 setUseUnderscoreLongJmp(true); 68 } 69 70 // Add legal addressing mode scale values. 71 addLegalAddressScale(8); 72 addLegalAddressScale(4); 73 addLegalAddressScale(2); 74 // Enter the ones which require both scale + index last. These are more 75 // expensive. 76 addLegalAddressScale(9); 77 addLegalAddressScale(5); 78 addLegalAddressScale(3); 79 80 // Set up the register classes. 81 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 82 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 83 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 84 if (Subtarget->is64Bit()) 85 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 86 87 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand); 88 89 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 90 // operation. 91 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 92 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 93 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 94 95 if (Subtarget->is64Bit()) { 96 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 97 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 98 } else { 99 if (X86ScalarSSE) 100 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP. 101 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand); 102 else 103 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 104 } 105 106 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 107 // this operation. 108 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 109 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 110 // SSE has no i16 to fp conversion, only i32 111 if (X86ScalarSSE) 112 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 113 else { 114 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 115 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 116 } 117 118 if (!Subtarget->is64Bit()) { 119 // Custom lower SINT_TO_FP and FP_TO_SINT from/to i64 in 32-bit mode. 120 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 121 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 122 } 123 124 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 125 // this operation. 126 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 127 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 128 129 if (X86ScalarSSE) { 130 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 131 } else { 132 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 133 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 134 } 135 136 // Handle FP_TO_UINT by promoting the destination to a larger signed 137 // conversion. 138 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 139 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 140 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 141 142 if (Subtarget->is64Bit()) { 143 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 144 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 145 } else { 146 if (X86ScalarSSE && !Subtarget->hasSSE3()) 147 // Expand FP_TO_UINT into a select. 148 // FIXME: We would like to use a Custom expander here eventually to do 149 // the optimal thing for SSE vs. the default expansion in the legalizer. 150 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 151 else 152 // With SSE3 we can use fisttpll to convert to a signed i64. 153 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 154 } 155 156 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 157 if (!X86ScalarSSE) { 158 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 159 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 160 } 161 162 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 163 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 164 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 165 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 166 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); 167 if (Subtarget->is64Bit()) 168 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand); 169 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand); 170 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand); 171 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 172 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 173 setOperationAction(ISD::FREM , MVT::f64 , Expand); 174 175 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 176 setOperationAction(ISD::CTTZ , MVT::i8 , Expand); 177 setOperationAction(ISD::CTLZ , MVT::i8 , Expand); 178 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 179 setOperationAction(ISD::CTTZ , MVT::i16 , Expand); 180 setOperationAction(ISD::CTLZ , MVT::i16 , Expand); 181 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 182 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 183 setOperationAction(ISD::CTLZ , MVT::i32 , Expand); 184 if (Subtarget->is64Bit()) { 185 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 186 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 187 setOperationAction(ISD::CTLZ , MVT::i64 , Expand); 188 } 189 190 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 191 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 192 193 // These should be promoted to a larger select which is supported. 194 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 195 setOperationAction(ISD::SELECT , MVT::i8 , Promote); 196 // X86 wants to expand cmov itself. 197 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 198 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 199 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 200 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 201 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 202 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 203 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 204 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 205 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 206 if (Subtarget->is64Bit()) { 207 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 208 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 209 } 210 // X86 ret instruction may pop stack. 211 setOperationAction(ISD::RET , MVT::Other, Custom); 212 // Darwin ABI issue. 213 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 214 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 215 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 216 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 217 if (Subtarget->is64Bit()) { 218 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 219 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 220 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 221 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 222 } 223 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 224 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 225 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 226 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 227 // X86 wants to expand memset / memcpy itself. 228 setOperationAction(ISD::MEMSET , MVT::Other, Custom); 229 setOperationAction(ISD::MEMCPY , MVT::Other, Custom); 230 231 // We don't have line number support yet. 232 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 233 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 234 // FIXME - use subtarget debug flags 235 if (!Subtarget->isTargetDarwin() && 236 !Subtarget->isTargetELF() && 237 !Subtarget->isTargetCygwin()) 238 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand); 239 240 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 241 setOperationAction(ISD::VASTART , MVT::Other, Custom); 242 243 // Use the default implementation. 244 setOperationAction(ISD::VAARG , MVT::Other, Expand); 245 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 246 setOperationAction(ISD::VAEND , MVT::Other, Expand); 247 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 248 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 249 if (Subtarget->is64Bit()) 250 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 251 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand); 252 253 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 254 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 255 256 if (X86ScalarSSE) { 257 // Set up the FP register classes. 258 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 259 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 260 261 // Use ANDPD to simulate FABS. 262 setOperationAction(ISD::FABS , MVT::f64, Custom); 263 setOperationAction(ISD::FABS , MVT::f32, Custom); 264 265 // Use XORP to simulate FNEG. 266 setOperationAction(ISD::FNEG , MVT::f64, Custom); 267 setOperationAction(ISD::FNEG , MVT::f32, Custom); 268 269 // We don't support sin/cos/fmod 270 setOperationAction(ISD::FSIN , MVT::f64, Expand); 271 setOperationAction(ISD::FCOS , MVT::f64, Expand); 272 setOperationAction(ISD::FREM , MVT::f64, Expand); 273 setOperationAction(ISD::FSIN , MVT::f32, Expand); 274 setOperationAction(ISD::FCOS , MVT::f32, Expand); 275 setOperationAction(ISD::FREM , MVT::f32, Expand); 276 277 // Expand FP immediates into loads from the stack, except for the special 278 // cases we handle. 279 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 280 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 281 addLegalFPImmediate(+0.0); // xorps / xorpd 282 } else { 283 // Set up the FP register classes. 284 addRegisterClass(MVT::f64, X86::RFPRegisterClass); 285 286 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 287 288 if (!UnsafeFPMath) { 289 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 290 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 291 } 292 293 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 294 addLegalFPImmediate(+0.0); // FLD0 295 addLegalFPImmediate(+1.0); // FLD1 296 addLegalFPImmediate(-0.0); // FLD0/FCHS 297 addLegalFPImmediate(-1.0); // FLD1/FCHS 298 } 299 300 // First set operation action for all vector types to expand. Then we 301 // will selectively turn on ones that can be effectively codegen'd. 302 for (unsigned VT = (unsigned)MVT::Vector + 1; 303 VT != (unsigned)MVT::LAST_VALUETYPE; VT++) { 304 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); 305 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); 306 setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); 307 setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); 308 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 309 setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); 310 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 311 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 312 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 313 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 314 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 315 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); 316 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); 317 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 318 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 319 } 320 321 if (Subtarget->hasMMX()) { 322 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass); 323 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass); 324 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass); 325 326 // FIXME: add MMX packed arithmetics 327 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Expand); 328 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Expand); 329 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Expand); 330 } 331 332 if (Subtarget->hasSSE1()) { 333 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 334 335 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 336 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 337 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 338 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 339 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 340 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 341 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 342 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 343 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 344 } 345 346 if (Subtarget->hasSSE2()) { 347 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 348 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 349 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 350 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 351 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 352 353 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 354 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 355 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 356 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 357 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 358 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 359 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 360 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 361 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 362 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 363 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 364 365 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 366 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 367 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 368 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 369 // Implement v4f32 insert_vector_elt in terms of SSE2 v8i16 ones. 370 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 371 372 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 373 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 374 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); 375 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); 376 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); 377 } 378 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 379 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 380 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 381 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 382 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 383 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 384 385 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 386 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 387 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); 388 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); 389 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); 390 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); 391 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); 392 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); 393 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); 394 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); 395 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 396 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); 397 } 398 399 // Custom lower v2i64 and v2f64 selects. 400 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 401 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 402 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 403 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 404 } 405 406 // We want to custom lower some of our intrinsics. 407 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 408 409 // We have target-specific dag combine patterns for the following nodes: 410 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 411 setTargetDAGCombine(ISD::SELECT); 412 413 computeRegisterProperties(); 414 415 // FIXME: These should be based on subtarget info. Plus, the values should 416 // be smaller when we are in optimizing for size mode. 417 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores 418 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores 419 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores 420 allowUnalignedMemoryAccesses = true; // x86 supports it! 421} 422 423//===----------------------------------------------------------------------===// 424// C Calling Convention implementation 425//===----------------------------------------------------------------------===// 426 427/// AddLiveIn - This helper function adds the specified physical register to the 428/// MachineFunction as a live in value. It also creates a corresponding virtual 429/// register for it. 430static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg, 431 TargetRegisterClass *RC) { 432 assert(RC->contains(PReg) && "Not the correct regclass!"); 433 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC); 434 MF.addLiveIn(PReg, VReg); 435 return VReg; 436} 437 438/// HowToPassCCCArgument - Returns how an formal argument of the specified type 439/// should be passed. If it is through stack, returns the size of the stack 440/// slot; if it is through XMM register, returns the number of XMM registers 441/// are needed. 442static void 443HowToPassCCCArgument(MVT::ValueType ObjectVT, unsigned NumXMMRegs, 444 unsigned &ObjSize, unsigned &ObjXMMRegs) { 445 ObjXMMRegs = 0; 446 447 switch (ObjectVT) { 448 default: assert(0 && "Unhandled argument type!"); 449 case MVT::i8: ObjSize = 1; break; 450 case MVT::i16: ObjSize = 2; break; 451 case MVT::i32: ObjSize = 4; break; 452 case MVT::i64: ObjSize = 8; break; 453 case MVT::f32: ObjSize = 4; break; 454 case MVT::f64: ObjSize = 8; break; 455 case MVT::v16i8: 456 case MVT::v8i16: 457 case MVT::v4i32: 458 case MVT::v2i64: 459 case MVT::v4f32: 460 case MVT::v2f64: 461 if (NumXMMRegs < 4) 462 ObjXMMRegs = 1; 463 else 464 ObjSize = 16; 465 break; 466 } 467} 468 469SDOperand X86TargetLowering::LowerCCCArguments(SDOperand Op, SelectionDAG &DAG) { 470 unsigned NumArgs = Op.Val->getNumValues() - 1; 471 MachineFunction &MF = DAG.getMachineFunction(); 472 MachineFrameInfo *MFI = MF.getFrameInfo(); 473 SDOperand Root = Op.getOperand(0); 474 std::vector<SDOperand> ArgValues; 475 476 // Add DAG nodes to load the arguments... On entry to a function on the X86, 477 // the stack frame looks like this: 478 // 479 // [ESP] -- return address 480 // [ESP + 4] -- first argument (leftmost lexically) 481 // [ESP + 8] -- second argument, if first argument is <= 4 bytes in size 482 // ... 483 // 484 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot 485 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing. 486 static const unsigned XMMArgRegs[] = { 487 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 488 }; 489 for (unsigned i = 0; i < NumArgs; ++i) { 490 MVT::ValueType ObjectVT = Op.getValue(i).getValueType(); 491 unsigned ArgIncrement = 4; 492 unsigned ObjSize = 0; 493 unsigned ObjXMMRegs = 0; 494 HowToPassCCCArgument(ObjectVT, NumXMMRegs, ObjSize, ObjXMMRegs); 495 if (ObjSize > 4) 496 ArgIncrement = ObjSize; 497 498 SDOperand ArgValue; 499 if (ObjXMMRegs) { 500 // Passed in a XMM register. 501 unsigned Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 502 X86::VR128RegisterClass); 503 ArgValue= DAG.getCopyFromReg(Root, Reg, ObjectVT); 504 ArgValues.push_back(ArgValue); 505 NumXMMRegs += ObjXMMRegs; 506 } else { 507 // XMM arguments have to be aligned on 16-byte boundary. 508 if (ObjSize == 16) 509 ArgOffset = ((ArgOffset + 15) / 16) * 16; 510 // Create the frame index object for this incoming parameter... 511 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); 512 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 513 ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0); 514 ArgValues.push_back(ArgValue); 515 ArgOffset += ArgIncrement; // Move on to the next argument... 516 } 517 } 518 519 ArgValues.push_back(Root); 520 521 // If the function takes variable number of arguments, make a frame index for 522 // the start of the first vararg value... for expansion of llvm.va_start. 523 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 524 if (isVarArg) 525 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset); 526 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only. 527 ReturnAddrIndex = 0; // No return address slot generated yet. 528 BytesToPopOnReturn = 0; // Callee pops nothing. 529 BytesCallerReserves = ArgOffset; 530 531 // If this is a struct return on, the callee pops the hidden struct 532 // pointer. This is common for Darwin/X86, Linux & Mingw32 targets. 533 if (MF.getFunction()->getCallingConv() == CallingConv::CSRet) 534 BytesToPopOnReturn = 4; 535 536 // Return the new list of results. 537 std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(), 538 Op.Val->value_end()); 539 return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size()); 540} 541 542 543SDOperand X86TargetLowering::LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG) { 544 SDOperand Chain = Op.getOperand(0); 545 unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 546 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 547 SDOperand Callee = Op.getOperand(4); 548 MVT::ValueType RetVT= Op.Val->getValueType(0); 549 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 550 551 // Keep track of the number of XMM regs passed so far. 552 unsigned NumXMMRegs = 0; 553 static const unsigned XMMArgRegs[] = { 554 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 555 }; 556 557 // Count how many bytes are to be pushed on the stack. 558 unsigned NumBytes = 0; 559 for (unsigned i = 0; i != NumOps; ++i) { 560 SDOperand Arg = Op.getOperand(5+2*i); 561 562 switch (Arg.getValueType()) { 563 default: assert(0 && "Unexpected ValueType for argument!"); 564 case MVT::i8: 565 case MVT::i16: 566 case MVT::i32: 567 case MVT::f32: 568 NumBytes += 4; 569 break; 570 case MVT::i64: 571 case MVT::f64: 572 NumBytes += 8; 573 break; 574 case MVT::v16i8: 575 case MVT::v8i16: 576 case MVT::v4i32: 577 case MVT::v2i64: 578 case MVT::v4f32: 579 case MVT::v2f64: 580 if (NumXMMRegs < 4) 581 ++NumXMMRegs; 582 else { 583 // XMM arguments have to be aligned on 16-byte boundary. 584 NumBytes = ((NumBytes + 15) / 16) * 16; 585 NumBytes += 16; 586 } 587 break; 588 } 589 } 590 591 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 592 593 // Arguments go on the stack in reverse order, as specified by the ABI. 594 unsigned ArgOffset = 0; 595 NumXMMRegs = 0; 596 std::vector<std::pair<unsigned, SDOperand> > RegsToPass; 597 std::vector<SDOperand> MemOpChains; 598 SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy()); 599 for (unsigned i = 0; i != NumOps; ++i) { 600 SDOperand Arg = Op.getOperand(5+2*i); 601 602 switch (Arg.getValueType()) { 603 default: assert(0 && "Unexpected ValueType for argument!"); 604 case MVT::i8: 605 case MVT::i16: { 606 // Promote the integer to 32 bits. If the input type is signed use a 607 // sign extend, otherwise use a zero extend. 608 unsigned ExtOp = 609 dyn_cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue() ? 610 ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 611 Arg = DAG.getNode(ExtOp, MVT::i32, Arg); 612 } 613 // Fallthrough 614 615 case MVT::i32: 616 case MVT::f32: { 617 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 618 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 619 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 620 ArgOffset += 4; 621 break; 622 } 623 case MVT::i64: 624 case MVT::f64: { 625 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 626 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 627 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 628 ArgOffset += 8; 629 break; 630 } 631 case MVT::v16i8: 632 case MVT::v8i16: 633 case MVT::v4i32: 634 case MVT::v2i64: 635 case MVT::v4f32: 636 case MVT::v2f64: 637 if (NumXMMRegs < 4) { 638 RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg)); 639 NumXMMRegs++; 640 } else { 641 // XMM arguments have to be aligned on 16-byte boundary. 642 ArgOffset = ((ArgOffset + 15) / 16) * 16; 643 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 644 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 645 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 646 ArgOffset += 16; 647 } 648 } 649 } 650 651 if (!MemOpChains.empty()) 652 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 653 &MemOpChains[0], MemOpChains.size()); 654 655 // Build a sequence of copy-to-reg nodes chained together with token chain 656 // and flag operands which copy the outgoing args into registers. 657 SDOperand InFlag; 658 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 659 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 660 InFlag); 661 InFlag = Chain.getValue(1); 662 } 663 664 // If the callee is a GlobalAddress node (quite common, every direct call is) 665 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 666 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 667 // We should use extra load for direct calls to dllimported functions in 668 // non-JIT mode. 669 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), 670 getTargetMachine(), true)) 671 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 672 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 673 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 674 675 std::vector<MVT::ValueType> NodeTys; 676 NodeTys.push_back(MVT::Other); // Returns a chain 677 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 678 std::vector<SDOperand> Ops; 679 Ops.push_back(Chain); 680 Ops.push_back(Callee); 681 682 // Add argument registers to the end of the list so that they are known live 683 // into the call. 684 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 685 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 686 RegsToPass[i].second.getValueType())); 687 688 if (InFlag.Val) 689 Ops.push_back(InFlag); 690 691 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, 692 NodeTys, &Ops[0], Ops.size()); 693 InFlag = Chain.getValue(1); 694 695 // Create the CALLSEQ_END node. 696 unsigned NumBytesForCalleeToPush = 0; 697 698 // If this is is a call to a struct-return function, the callee 699 // pops the hidden struct pointer, so we have to push it back. 700 // This is common for Darwin/X86, Linux & Mingw32 targets. 701 if (CallingConv == CallingConv::CSRet) 702 NumBytesForCalleeToPush = 4; 703 704 NodeTys.clear(); 705 NodeTys.push_back(MVT::Other); // Returns a chain 706 if (RetVT != MVT::Other) 707 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 708 Ops.clear(); 709 Ops.push_back(Chain); 710 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 711 Ops.push_back(DAG.getConstant(NumBytesForCalleeToPush, getPointerTy())); 712 Ops.push_back(InFlag); 713 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 714 if (RetVT != MVT::Other) 715 InFlag = Chain.getValue(1); 716 717 std::vector<SDOperand> ResultVals; 718 NodeTys.clear(); 719 switch (RetVT) { 720 default: assert(0 && "Unknown value type to return!"); 721 case MVT::Other: break; 722 case MVT::i8: 723 Chain = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag).getValue(1); 724 ResultVals.push_back(Chain.getValue(0)); 725 NodeTys.push_back(MVT::i8); 726 break; 727 case MVT::i16: 728 Chain = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag).getValue(1); 729 ResultVals.push_back(Chain.getValue(0)); 730 NodeTys.push_back(MVT::i16); 731 break; 732 case MVT::i32: 733 if (Op.Val->getValueType(1) == MVT::i32) { 734 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1); 735 ResultVals.push_back(Chain.getValue(0)); 736 Chain = DAG.getCopyFromReg(Chain, X86::EDX, MVT::i32, 737 Chain.getValue(2)).getValue(1); 738 ResultVals.push_back(Chain.getValue(0)); 739 NodeTys.push_back(MVT::i32); 740 } else { 741 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1); 742 ResultVals.push_back(Chain.getValue(0)); 743 } 744 NodeTys.push_back(MVT::i32); 745 break; 746 case MVT::v16i8: 747 case MVT::v8i16: 748 case MVT::v4i32: 749 case MVT::v2i64: 750 case MVT::v4f32: 751 case MVT::v2f64: 752 Chain = DAG.getCopyFromReg(Chain, X86::XMM0, RetVT, InFlag).getValue(1); 753 ResultVals.push_back(Chain.getValue(0)); 754 NodeTys.push_back(RetVT); 755 break; 756 case MVT::f32: 757 case MVT::f64: { 758 std::vector<MVT::ValueType> Tys; 759 Tys.push_back(MVT::f64); 760 Tys.push_back(MVT::Other); 761 Tys.push_back(MVT::Flag); 762 std::vector<SDOperand> Ops; 763 Ops.push_back(Chain); 764 Ops.push_back(InFlag); 765 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, 766 &Ops[0], Ops.size()); 767 Chain = RetVal.getValue(1); 768 InFlag = RetVal.getValue(2); 769 if (X86ScalarSSE) { 770 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This 771 // shouldn't be necessary except that RFP cannot be live across 772 // multiple blocks. When stackifier is fixed, they can be uncoupled. 773 MachineFunction &MF = DAG.getMachineFunction(); 774 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 775 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 776 Tys.clear(); 777 Tys.push_back(MVT::Other); 778 Ops.clear(); 779 Ops.push_back(Chain); 780 Ops.push_back(RetVal); 781 Ops.push_back(StackSlot); 782 Ops.push_back(DAG.getValueType(RetVT)); 783 Ops.push_back(InFlag); 784 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 785 RetVal = DAG.getLoad(RetVT, Chain, StackSlot, NULL, 0); 786 Chain = RetVal.getValue(1); 787 } 788 789 if (RetVT == MVT::f32 && !X86ScalarSSE) 790 // FIXME: we would really like to remember that this FP_ROUND 791 // operation is okay to eliminate if we allow excess FP precision. 792 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal); 793 ResultVals.push_back(RetVal); 794 NodeTys.push_back(RetVT); 795 break; 796 } 797 } 798 799 // If the function returns void, just return the chain. 800 if (ResultVals.empty()) 801 return Chain; 802 803 // Otherwise, merge everything together with a MERGE_VALUES node. 804 NodeTys.push_back(MVT::Other); 805 ResultVals.push_back(Chain); 806 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, 807 &ResultVals[0], ResultVals.size()); 808 return Res.getValue(Op.ResNo); 809} 810 811 812//===----------------------------------------------------------------------===// 813// X86-64 C Calling Convention implementation 814//===----------------------------------------------------------------------===// 815 816/// HowToPassX86_64CCCArgument - Returns how an formal argument of the specified 817/// type should be passed. If it is through stack, returns the size of the stack 818/// slot; if it is through integer or XMM register, returns the number of 819/// integer or XMM registers are needed. 820static void 821HowToPassX86_64CCCArgument(MVT::ValueType ObjectVT, 822 unsigned NumIntRegs, unsigned NumXMMRegs, 823 unsigned &ObjSize, unsigned &ObjIntRegs, 824 unsigned &ObjXMMRegs) { 825 ObjSize = 0; 826 ObjIntRegs = 0; 827 ObjXMMRegs = 0; 828 829 switch (ObjectVT) { 830 default: assert(0 && "Unhandled argument type!"); 831 case MVT::i8: 832 case MVT::i16: 833 case MVT::i32: 834 case MVT::i64: 835 if (NumIntRegs < 6) 836 ObjIntRegs = 1; 837 else { 838 switch (ObjectVT) { 839 default: break; 840 case MVT::i8: ObjSize = 1; break; 841 case MVT::i16: ObjSize = 2; break; 842 case MVT::i32: ObjSize = 4; break; 843 case MVT::i64: ObjSize = 8; break; 844 } 845 } 846 break; 847 case MVT::f32: 848 case MVT::f64: 849 case MVT::v16i8: 850 case MVT::v8i16: 851 case MVT::v4i32: 852 case MVT::v2i64: 853 case MVT::v4f32: 854 case MVT::v2f64: 855 if (NumXMMRegs < 8) 856 ObjXMMRegs = 1; 857 else { 858 switch (ObjectVT) { 859 default: break; 860 case MVT::f32: ObjSize = 4; break; 861 case MVT::f64: ObjSize = 8; break; 862 case MVT::v16i8: 863 case MVT::v8i16: 864 case MVT::v4i32: 865 case MVT::v2i64: 866 case MVT::v4f32: 867 case MVT::v2f64: ObjSize = 16; break; 868 } 869 break; 870 } 871 } 872} 873 874SDOperand 875X86TargetLowering::LowerX86_64CCCArguments(SDOperand Op, SelectionDAG &DAG) { 876 unsigned NumArgs = Op.Val->getNumValues() - 1; 877 MachineFunction &MF = DAG.getMachineFunction(); 878 MachineFrameInfo *MFI = MF.getFrameInfo(); 879 SDOperand Root = Op.getOperand(0); 880 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 881 std::vector<SDOperand> ArgValues; 882 883 // Add DAG nodes to load the arguments... On entry to a function on the X86, 884 // the stack frame looks like this: 885 // 886 // [RSP] -- return address 887 // [RSP + 8] -- first nonreg argument (leftmost lexically) 888 // [RSP +16] -- second nonreg argument, if 1st argument is <= 8 bytes in size 889 // ... 890 // 891 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot 892 unsigned NumIntRegs = 0; // Int regs used for parameter passing. 893 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing. 894 895 static const unsigned GPR8ArgRegs[] = { 896 X86::DIL, X86::SIL, X86::DL, X86::CL, X86::R8B, X86::R9B 897 }; 898 static const unsigned GPR16ArgRegs[] = { 899 X86::DI, X86::SI, X86::DX, X86::CX, X86::R8W, X86::R9W 900 }; 901 static const unsigned GPR32ArgRegs[] = { 902 X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D 903 }; 904 static const unsigned GPR64ArgRegs[] = { 905 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 906 }; 907 static const unsigned XMMArgRegs[] = { 908 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 909 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 910 }; 911 912 for (unsigned i = 0; i < NumArgs; ++i) { 913 MVT::ValueType ObjectVT = Op.getValue(i).getValueType(); 914 unsigned ArgIncrement = 8; 915 unsigned ObjSize = 0; 916 unsigned ObjIntRegs = 0; 917 unsigned ObjXMMRegs = 0; 918 919 // FIXME: __int128 and long double support? 920 HowToPassX86_64CCCArgument(ObjectVT, NumIntRegs, NumXMMRegs, 921 ObjSize, ObjIntRegs, ObjXMMRegs); 922 if (ObjSize > 8) 923 ArgIncrement = ObjSize; 924 925 unsigned Reg = 0; 926 SDOperand ArgValue; 927 if (ObjIntRegs || ObjXMMRegs) { 928 switch (ObjectVT) { 929 default: assert(0 && "Unhandled argument type!"); 930 case MVT::i8: 931 case MVT::i16: 932 case MVT::i32: 933 case MVT::i64: { 934 TargetRegisterClass *RC = NULL; 935 switch (ObjectVT) { 936 default: break; 937 case MVT::i8: 938 RC = X86::GR8RegisterClass; 939 Reg = GPR8ArgRegs[NumIntRegs]; 940 break; 941 case MVT::i16: 942 RC = X86::GR16RegisterClass; 943 Reg = GPR16ArgRegs[NumIntRegs]; 944 break; 945 case MVT::i32: 946 RC = X86::GR32RegisterClass; 947 Reg = GPR32ArgRegs[NumIntRegs]; 948 break; 949 case MVT::i64: 950 RC = X86::GR64RegisterClass; 951 Reg = GPR64ArgRegs[NumIntRegs]; 952 break; 953 } 954 Reg = AddLiveIn(MF, Reg, RC); 955 ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT); 956 break; 957 } 958 case MVT::f32: 959 case MVT::f64: 960 case MVT::v16i8: 961 case MVT::v8i16: 962 case MVT::v4i32: 963 case MVT::v2i64: 964 case MVT::v4f32: 965 case MVT::v2f64: { 966 TargetRegisterClass *RC= (ObjectVT == MVT::f32) ? 967 X86::FR32RegisterClass : ((ObjectVT == MVT::f64) ? 968 X86::FR64RegisterClass : X86::VR128RegisterClass); 969 Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], RC); 970 ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT); 971 break; 972 } 973 } 974 NumIntRegs += ObjIntRegs; 975 NumXMMRegs += ObjXMMRegs; 976 } else if (ObjSize) { 977 // XMM arguments have to be aligned on 16-byte boundary. 978 if (ObjSize == 16) 979 ArgOffset = ((ArgOffset + 15) / 16) * 16; 980 // Create the SelectionDAG nodes corresponding to a load from this 981 // parameter. 982 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); 983 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 984 ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0); 985 ArgOffset += ArgIncrement; // Move on to the next argument. 986 } 987 988 ArgValues.push_back(ArgValue); 989 } 990 991 // If the function takes variable number of arguments, make a frame index for 992 // the start of the first vararg value... for expansion of llvm.va_start. 993 if (isVarArg) { 994 // For X86-64, if there are vararg parameters that are passed via 995 // registers, then we must store them to their spots on the stack so they 996 // may be loaded by deferencing the result of va_next. 997 VarArgsGPOffset = NumIntRegs * 8; 998 VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16; 999 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset); 1000 RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16); 1001 1002 // Store the integer parameter registers. 1003 std::vector<SDOperand> MemOps; 1004 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 1005 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1006 DAG.getConstant(VarArgsGPOffset, getPointerTy())); 1007 for (; NumIntRegs != 6; ++NumIntRegs) { 1008 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], 1009 X86::GR64RegisterClass); 1010 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1011 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1012 MemOps.push_back(Store); 1013 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1014 DAG.getConstant(8, getPointerTy())); 1015 } 1016 1017 // Now store the XMM (fp + vector) parameter registers. 1018 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1019 DAG.getConstant(VarArgsFPOffset, getPointerTy())); 1020 for (; NumXMMRegs != 8; ++NumXMMRegs) { 1021 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 1022 X86::VR128RegisterClass); 1023 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); 1024 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1025 MemOps.push_back(Store); 1026 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1027 DAG.getConstant(16, getPointerTy())); 1028 } 1029 if (!MemOps.empty()) 1030 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 1031 &MemOps[0], MemOps.size()); 1032 } 1033 1034 ArgValues.push_back(Root); 1035 1036 ReturnAddrIndex = 0; // No return address slot generated yet. 1037 BytesToPopOnReturn = 0; // Callee pops nothing. 1038 BytesCallerReserves = ArgOffset; 1039 1040 // Return the new list of results. 1041 std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(), 1042 Op.Val->value_end()); 1043 return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size()); 1044} 1045 1046SDOperand 1047X86TargetLowering::LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG) { 1048 SDOperand Chain = Op.getOperand(0); 1049 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1050 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 1051 SDOperand Callee = Op.getOperand(4); 1052 MVT::ValueType RetVT= Op.Val->getValueType(0); 1053 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 1054 1055 // Count how many bytes are to be pushed on the stack. 1056 unsigned NumBytes = 0; 1057 unsigned NumIntRegs = 0; // Int regs used for parameter passing. 1058 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing. 1059 1060 static const unsigned GPR8ArgRegs[] = { 1061 X86::DIL, X86::SIL, X86::DL, X86::CL, X86::R8B, X86::R9B 1062 }; 1063 static const unsigned GPR16ArgRegs[] = { 1064 X86::DI, X86::SI, X86::DX, X86::CX, X86::R8W, X86::R9W 1065 }; 1066 static const unsigned GPR32ArgRegs[] = { 1067 X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D 1068 }; 1069 static const unsigned GPR64ArgRegs[] = { 1070 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1071 }; 1072 static const unsigned XMMArgRegs[] = { 1073 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1074 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1075 }; 1076 1077 for (unsigned i = 0; i != NumOps; ++i) { 1078 SDOperand Arg = Op.getOperand(5+2*i); 1079 MVT::ValueType ArgVT = Arg.getValueType(); 1080 1081 switch (ArgVT) { 1082 default: assert(0 && "Unknown value type!"); 1083 case MVT::i8: 1084 case MVT::i16: 1085 case MVT::i32: 1086 case MVT::i64: 1087 if (NumIntRegs < 6) 1088 ++NumIntRegs; 1089 else 1090 NumBytes += 8; 1091 break; 1092 case MVT::f32: 1093 case MVT::f64: 1094 case MVT::v16i8: 1095 case MVT::v8i16: 1096 case MVT::v4i32: 1097 case MVT::v2i64: 1098 case MVT::v4f32: 1099 case MVT::v2f64: 1100 if (NumXMMRegs < 8) 1101 NumXMMRegs++; 1102 else if (ArgVT == MVT::f32 || ArgVT == MVT::f64) 1103 NumBytes += 8; 1104 else { 1105 // XMM arguments have to be aligned on 16-byte boundary. 1106 NumBytes = ((NumBytes + 15) / 16) * 16; 1107 NumBytes += 16; 1108 } 1109 break; 1110 } 1111 } 1112 1113 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 1114 1115 // Arguments go on the stack in reverse order, as specified by the ABI. 1116 unsigned ArgOffset = 0; 1117 NumIntRegs = 0; 1118 NumXMMRegs = 0; 1119 std::vector<std::pair<unsigned, SDOperand> > RegsToPass; 1120 std::vector<SDOperand> MemOpChains; 1121 SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy()); 1122 for (unsigned i = 0; i != NumOps; ++i) { 1123 SDOperand Arg = Op.getOperand(5+2*i); 1124 MVT::ValueType ArgVT = Arg.getValueType(); 1125 1126 switch (ArgVT) { 1127 default: assert(0 && "Unexpected ValueType for argument!"); 1128 case MVT::i8: 1129 case MVT::i16: 1130 case MVT::i32: 1131 case MVT::i64: 1132 if (NumIntRegs < 6) { 1133 unsigned Reg = 0; 1134 switch (ArgVT) { 1135 default: break; 1136 case MVT::i8: Reg = GPR8ArgRegs[NumIntRegs]; break; 1137 case MVT::i16: Reg = GPR16ArgRegs[NumIntRegs]; break; 1138 case MVT::i32: Reg = GPR32ArgRegs[NumIntRegs]; break; 1139 case MVT::i64: Reg = GPR64ArgRegs[NumIntRegs]; break; 1140 } 1141 RegsToPass.push_back(std::make_pair(Reg, Arg)); 1142 ++NumIntRegs; 1143 } else { 1144 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1145 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1146 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1147 ArgOffset += 8; 1148 } 1149 break; 1150 case MVT::f32: 1151 case MVT::f64: 1152 case MVT::v16i8: 1153 case MVT::v8i16: 1154 case MVT::v4i32: 1155 case MVT::v2i64: 1156 case MVT::v4f32: 1157 case MVT::v2f64: 1158 if (NumXMMRegs < 8) { 1159 RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg)); 1160 NumXMMRegs++; 1161 } else { 1162 if (ArgVT != MVT::f32 && ArgVT != MVT::f64) { 1163 // XMM arguments have to be aligned on 16-byte boundary. 1164 ArgOffset = ((ArgOffset + 15) / 16) * 16; 1165 } 1166 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1167 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1168 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1169 if (ArgVT == MVT::f32 || ArgVT == MVT::f64) 1170 ArgOffset += 8; 1171 else 1172 ArgOffset += 16; 1173 } 1174 } 1175 } 1176 1177 if (!MemOpChains.empty()) 1178 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1179 &MemOpChains[0], MemOpChains.size()); 1180 1181 // Build a sequence of copy-to-reg nodes chained together with token chain 1182 // and flag operands which copy the outgoing args into registers. 1183 SDOperand InFlag; 1184 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1185 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1186 InFlag); 1187 InFlag = Chain.getValue(1); 1188 } 1189 1190 if (isVarArg) { 1191 // From AMD64 ABI document: 1192 // For calls that may call functions that use varargs or stdargs 1193 // (prototype-less calls or calls to functions containing ellipsis (...) in 1194 // the declaration) %al is used as hidden argument to specify the number 1195 // of SSE registers used. The contents of %al do not need to match exactly 1196 // the number of registers, but must be an ubound on the number of SSE 1197 // registers used and is in the range 0 - 8 inclusive. 1198 Chain = DAG.getCopyToReg(Chain, X86::AL, 1199 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 1200 InFlag = Chain.getValue(1); 1201 } 1202 1203 // If the callee is a GlobalAddress node (quite common, every direct call is) 1204 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1205 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1206 // We should use extra load for direct calls to dllimported functions in 1207 // non-JIT mode. 1208 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1209 getTargetMachine(), true)) 1210 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1211 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1212 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1213 1214 std::vector<MVT::ValueType> NodeTys; 1215 NodeTys.push_back(MVT::Other); // Returns a chain 1216 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 1217 std::vector<SDOperand> Ops; 1218 Ops.push_back(Chain); 1219 Ops.push_back(Callee); 1220 1221 // Add argument registers to the end of the list so that they are known live 1222 // into the call. 1223 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1224 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1225 RegsToPass[i].second.getValueType())); 1226 1227 if (InFlag.Val) 1228 Ops.push_back(InFlag); 1229 1230 // FIXME: Do not generate X86ISD::TAILCALL for now. 1231 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, 1232 NodeTys, &Ops[0], Ops.size()); 1233 InFlag = Chain.getValue(1); 1234 1235 NodeTys.clear(); 1236 NodeTys.push_back(MVT::Other); // Returns a chain 1237 if (RetVT != MVT::Other) 1238 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 1239 Ops.clear(); 1240 Ops.push_back(Chain); 1241 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1242 Ops.push_back(DAG.getConstant(0, getPointerTy())); 1243 Ops.push_back(InFlag); 1244 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1245 if (RetVT != MVT::Other) 1246 InFlag = Chain.getValue(1); 1247 1248 std::vector<SDOperand> ResultVals; 1249 NodeTys.clear(); 1250 switch (RetVT) { 1251 default: assert(0 && "Unknown value type to return!"); 1252 case MVT::Other: break; 1253 case MVT::i8: 1254 Chain = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag).getValue(1); 1255 ResultVals.push_back(Chain.getValue(0)); 1256 NodeTys.push_back(MVT::i8); 1257 break; 1258 case MVT::i16: 1259 Chain = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag).getValue(1); 1260 ResultVals.push_back(Chain.getValue(0)); 1261 NodeTys.push_back(MVT::i16); 1262 break; 1263 case MVT::i32: 1264 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1); 1265 ResultVals.push_back(Chain.getValue(0)); 1266 NodeTys.push_back(MVT::i32); 1267 break; 1268 case MVT::i64: 1269 if (Op.Val->getValueType(1) == MVT::i64) { 1270 // FIXME: __int128 support? 1271 Chain = DAG.getCopyFromReg(Chain, X86::RAX, MVT::i64, InFlag).getValue(1); 1272 ResultVals.push_back(Chain.getValue(0)); 1273 Chain = DAG.getCopyFromReg(Chain, X86::RDX, MVT::i64, 1274 Chain.getValue(2)).getValue(1); 1275 ResultVals.push_back(Chain.getValue(0)); 1276 NodeTys.push_back(MVT::i64); 1277 } else { 1278 Chain = DAG.getCopyFromReg(Chain, X86::RAX, MVT::i64, InFlag).getValue(1); 1279 ResultVals.push_back(Chain.getValue(0)); 1280 } 1281 NodeTys.push_back(MVT::i64); 1282 break; 1283 case MVT::f32: 1284 case MVT::f64: 1285 case MVT::v16i8: 1286 case MVT::v8i16: 1287 case MVT::v4i32: 1288 case MVT::v2i64: 1289 case MVT::v4f32: 1290 case MVT::v2f64: 1291 // FIXME: long double support? 1292 Chain = DAG.getCopyFromReg(Chain, X86::XMM0, RetVT, InFlag).getValue(1); 1293 ResultVals.push_back(Chain.getValue(0)); 1294 NodeTys.push_back(RetVT); 1295 break; 1296 } 1297 1298 // If the function returns void, just return the chain. 1299 if (ResultVals.empty()) 1300 return Chain; 1301 1302 // Otherwise, merge everything together with a MERGE_VALUES node. 1303 NodeTys.push_back(MVT::Other); 1304 ResultVals.push_back(Chain); 1305 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, 1306 &ResultVals[0], ResultVals.size()); 1307 return Res.getValue(Op.ResNo); 1308} 1309 1310//===----------------------------------------------------------------------===// 1311// Fast Calling Convention implementation 1312//===----------------------------------------------------------------------===// 1313// 1314// The X86 'fast' calling convention passes up to two integer arguments in 1315// registers (an appropriate portion of EAX/EDX), passes arguments in C order, 1316// and requires that the callee pop its arguments off the stack (allowing proper 1317// tail calls), and has the same return value conventions as C calling convs. 1318// 1319// This calling convention always arranges for the callee pop value to be 8n+4 1320// bytes, which is needed for tail recursion elimination and stack alignment 1321// reasons. 1322// 1323// Note that this can be enhanced in the future to pass fp vals in registers 1324// (when we have a global fp allocator) and do other tricks. 1325// 1326 1327/// HowToPassFastCCArgument - Returns how an formal argument of the specified 1328/// type should be passed. If it is through stack, returns the size of the stack 1329/// slot; if it is through integer or XMM register, returns the number of 1330/// integer or XMM registers are needed. 1331static void 1332HowToPassFastCCArgument(MVT::ValueType ObjectVT, 1333 unsigned NumIntRegs, unsigned NumXMMRegs, 1334 unsigned &ObjSize, unsigned &ObjIntRegs, 1335 unsigned &ObjXMMRegs) { 1336 ObjSize = 0; 1337 ObjIntRegs = 0; 1338 ObjXMMRegs = 0; 1339 1340 switch (ObjectVT) { 1341 default: assert(0 && "Unhandled argument type!"); 1342 case MVT::i8: 1343#if FASTCC_NUM_INT_ARGS_INREGS > 0 1344 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) 1345 ObjIntRegs = 1; 1346 else 1347#endif 1348 ObjSize = 1; 1349 break; 1350 case MVT::i16: 1351#if FASTCC_NUM_INT_ARGS_INREGS > 0 1352 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) 1353 ObjIntRegs = 1; 1354 else 1355#endif 1356 ObjSize = 2; 1357 break; 1358 case MVT::i32: 1359#if FASTCC_NUM_INT_ARGS_INREGS > 0 1360 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) 1361 ObjIntRegs = 1; 1362 else 1363#endif 1364 ObjSize = 4; 1365 break; 1366 case MVT::i64: 1367#if FASTCC_NUM_INT_ARGS_INREGS > 0 1368 if (NumIntRegs+2 <= FASTCC_NUM_INT_ARGS_INREGS) { 1369 ObjIntRegs = 2; 1370 } else if (NumIntRegs+1 <= FASTCC_NUM_INT_ARGS_INREGS) { 1371 ObjIntRegs = 1; 1372 ObjSize = 4; 1373 } else 1374#endif 1375 ObjSize = 8; 1376 case MVT::f32: 1377 ObjSize = 4; 1378 break; 1379 case MVT::f64: 1380 ObjSize = 8; 1381 break; 1382 case MVT::v16i8: 1383 case MVT::v8i16: 1384 case MVT::v4i32: 1385 case MVT::v2i64: 1386 case MVT::v4f32: 1387 case MVT::v2f64: 1388 if (NumXMMRegs < 4) 1389 ObjXMMRegs = 1; 1390 else 1391 ObjSize = 16; 1392 break; 1393 } 1394} 1395 1396SDOperand 1397X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG) { 1398 unsigned NumArgs = Op.Val->getNumValues()-1; 1399 MachineFunction &MF = DAG.getMachineFunction(); 1400 MachineFrameInfo *MFI = MF.getFrameInfo(); 1401 SDOperand Root = Op.getOperand(0); 1402 std::vector<SDOperand> ArgValues; 1403 1404 // Add DAG nodes to load the arguments... On entry to a function the stack 1405 // frame looks like this: 1406 // 1407 // [ESP] -- return address 1408 // [ESP + 4] -- first nonreg argument (leftmost lexically) 1409 // [ESP + 8] -- second nonreg argument, if 1st argument is <= 4 bytes in size 1410 // ... 1411 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot 1412 1413 // Keep track of the number of integer regs passed so far. This can be either 1414 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both 1415 // used). 1416 unsigned NumIntRegs = 0; 1417 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing. 1418 1419 static const unsigned XMMArgRegs[] = { 1420 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 1421 }; 1422 1423 for (unsigned i = 0; i < NumArgs; ++i) { 1424 MVT::ValueType ObjectVT = Op.getValue(i).getValueType(); 1425 unsigned ArgIncrement = 4; 1426 unsigned ObjSize = 0; 1427 unsigned ObjIntRegs = 0; 1428 unsigned ObjXMMRegs = 0; 1429 1430 HowToPassFastCCArgument(ObjectVT, NumIntRegs, NumXMMRegs, 1431 ObjSize, ObjIntRegs, ObjXMMRegs); 1432 if (ObjSize > 4) 1433 ArgIncrement = ObjSize; 1434 1435 unsigned Reg = 0; 1436 SDOperand ArgValue; 1437 if (ObjIntRegs || ObjXMMRegs) { 1438 switch (ObjectVT) { 1439 default: assert(0 && "Unhandled argument type!"); 1440 case MVT::i8: 1441 Reg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL, 1442 X86::GR8RegisterClass); 1443 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i8); 1444 break; 1445 case MVT::i16: 1446 Reg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX, 1447 X86::GR16RegisterClass); 1448 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i16); 1449 break; 1450 case MVT::i32: 1451 Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX, 1452 X86::GR32RegisterClass); 1453 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i32); 1454 break; 1455 case MVT::i64: 1456 Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX, 1457 X86::GR32RegisterClass); 1458 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i32); 1459 if (ObjIntRegs == 2) { 1460 Reg = AddLiveIn(MF, X86::EDX, X86::GR32RegisterClass); 1461 SDOperand ArgValue2 = DAG.getCopyFromReg(Root, Reg, MVT::i32); 1462 ArgValue= DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2); 1463 } 1464 break; 1465 case MVT::v16i8: 1466 case MVT::v8i16: 1467 case MVT::v4i32: 1468 case MVT::v2i64: 1469 case MVT::v4f32: 1470 case MVT::v2f64: 1471 Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], X86::VR128RegisterClass); 1472 ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT); 1473 break; 1474 } 1475 NumIntRegs += ObjIntRegs; 1476 NumXMMRegs += ObjXMMRegs; 1477 } 1478 1479 if (ObjSize) { 1480 // XMM arguments have to be aligned on 16-byte boundary. 1481 if (ObjSize == 16) 1482 ArgOffset = ((ArgOffset + 15) / 16) * 16; 1483 // Create the SelectionDAG nodes corresponding to a load from this 1484 // parameter. 1485 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); 1486 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 1487 if (ObjectVT == MVT::i64 && ObjIntRegs) { 1488 SDOperand ArgValue2 = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, 1489 NULL, 0); 1490 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2); 1491 } else 1492 ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0); 1493 ArgOffset += ArgIncrement; // Move on to the next argument. 1494 } 1495 1496 ArgValues.push_back(ArgValue); 1497 } 1498 1499 ArgValues.push_back(Root); 1500 1501 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1502 // arguments and the arguments after the retaddr has been pushed are aligned. 1503 if ((ArgOffset & 7) == 0) 1504 ArgOffset += 4; 1505 1506 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 1507 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only. 1508 ReturnAddrIndex = 0; // No return address slot generated yet. 1509 BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments. 1510 BytesCallerReserves = 0; 1511 1512 // Finally, inform the code generator which regs we return values in. 1513 switch (getValueType(MF.getFunction()->getReturnType())) { 1514 default: assert(0 && "Unknown type!"); 1515 case MVT::isVoid: break; 1516 case MVT::i1: 1517 case MVT::i8: 1518 case MVT::i16: 1519 case MVT::i32: 1520 MF.addLiveOut(X86::EAX); 1521 break; 1522 case MVT::i64: 1523 MF.addLiveOut(X86::EAX); 1524 MF.addLiveOut(X86::EDX); 1525 break; 1526 case MVT::f32: 1527 case MVT::f64: 1528 MF.addLiveOut(X86::ST0); 1529 break; 1530 case MVT::v16i8: 1531 case MVT::v8i16: 1532 case MVT::v4i32: 1533 case MVT::v2i64: 1534 case MVT::v4f32: 1535 case MVT::v2f64: 1536 MF.addLiveOut(X86::XMM0); 1537 break; 1538 } 1539 1540 // Return the new list of results. 1541 std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(), 1542 Op.Val->value_end()); 1543 return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size()); 1544} 1545 1546SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG, 1547 bool isFastCall) { 1548 SDOperand Chain = Op.getOperand(0); 1549 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 1550 SDOperand Callee = Op.getOperand(4); 1551 MVT::ValueType RetVT= Op.Val->getValueType(0); 1552 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 1553 1554 // Count how many bytes are to be pushed on the stack. 1555 unsigned NumBytes = 0; 1556 1557 // Keep track of the number of integer regs passed so far. This can be either 1558 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both 1559 // used). 1560 unsigned NumIntRegs = 0; 1561 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing. 1562 1563 static const unsigned GPRArgRegs[][2] = { 1564 { X86::AL, X86::DL }, 1565 { X86::AX, X86::DX }, 1566 { X86::EAX, X86::EDX } 1567 }; 1568#if 0 1569 static const unsigned FastCallGPRArgRegs[][2] = { 1570 { X86::CL, X86::DL }, 1571 { X86::CX, X86::DX }, 1572 { X86::ECX, X86::EDX } 1573 }; 1574#endif 1575 static const unsigned XMMArgRegs[] = { 1576 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 1577 }; 1578 1579 for (unsigned i = 0; i != NumOps; ++i) { 1580 SDOperand Arg = Op.getOperand(5+2*i); 1581 1582 switch (Arg.getValueType()) { 1583 default: assert(0 && "Unknown value type!"); 1584 case MVT::i8: 1585 case MVT::i16: 1586 case MVT::i32: { 1587 unsigned MaxNumIntRegs = (isFastCall ? 2 : FASTCC_NUM_INT_ARGS_INREGS); 1588 if (NumIntRegs < MaxNumIntRegs) { 1589 ++NumIntRegs; 1590 break; 1591 } 1592 } // Fall through 1593 case MVT::f32: 1594 NumBytes += 4; 1595 break; 1596 case MVT::f64: 1597 NumBytes += 8; 1598 break; 1599 case MVT::v16i8: 1600 case MVT::v8i16: 1601 case MVT::v4i32: 1602 case MVT::v2i64: 1603 case MVT::v4f32: 1604 case MVT::v2f64: 1605 if (isFastCall) { 1606 assert(0 && "Unknown value type!"); 1607 } else { 1608 if (NumXMMRegs < 4) 1609 NumXMMRegs++; 1610 else { 1611 // XMM arguments have to be aligned on 16-byte boundary. 1612 NumBytes = ((NumBytes + 15) / 16) * 16; 1613 NumBytes += 16; 1614 } 1615 } 1616 break; 1617 } 1618 } 1619 1620 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1621 // arguments and the arguments after the retaddr has been pushed are aligned. 1622 if ((NumBytes & 7) == 0) 1623 NumBytes += 4; 1624 1625 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 1626 1627 // Arguments go on the stack in reverse order, as specified by the ABI. 1628 unsigned ArgOffset = 0; 1629 NumIntRegs = 0; 1630 std::vector<std::pair<unsigned, SDOperand> > RegsToPass; 1631 std::vector<SDOperand> MemOpChains; 1632 SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy()); 1633 for (unsigned i = 0; i != NumOps; ++i) { 1634 SDOperand Arg = Op.getOperand(5+2*i); 1635 1636 switch (Arg.getValueType()) { 1637 default: assert(0 && "Unexpected ValueType for argument!"); 1638 case MVT::i8: 1639 case MVT::i16: 1640 case MVT::i32: { 1641 unsigned MaxNumIntRegs = (isFastCall ? 2 : FASTCC_NUM_INT_ARGS_INREGS); 1642 if (NumIntRegs < MaxNumIntRegs) { 1643 RegsToPass.push_back( 1644 std::make_pair(GPRArgRegs[Arg.getValueType()-MVT::i8][NumIntRegs], 1645 Arg)); 1646 ++NumIntRegs; 1647 break; 1648 } 1649 } // Fall through 1650 case MVT::f32: { 1651 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1652 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1653 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1654 ArgOffset += 4; 1655 break; 1656 } 1657 case MVT::f64: { 1658 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1659 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1660 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1661 ArgOffset += 8; 1662 break; 1663 } 1664 case MVT::v16i8: 1665 case MVT::v8i16: 1666 case MVT::v4i32: 1667 case MVT::v2i64: 1668 case MVT::v4f32: 1669 case MVT::v2f64: 1670 if (isFastCall) { 1671 assert(0 && "Unexpected ValueType for argument!"); 1672 } else { 1673 if (NumXMMRegs < 4) { 1674 RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg)); 1675 NumXMMRegs++; 1676 } else { 1677 // XMM arguments have to be aligned on 16-byte boundary. 1678 ArgOffset = ((ArgOffset + 15) / 16) * 16; 1679 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1680 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1681 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1682 ArgOffset += 16; 1683 } 1684 } 1685 break; 1686 } 1687 } 1688 1689 if (!MemOpChains.empty()) 1690 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1691 &MemOpChains[0], MemOpChains.size()); 1692 1693 // Build a sequence of copy-to-reg nodes chained together with token chain 1694 // and flag operands which copy the outgoing args into registers. 1695 SDOperand InFlag; 1696 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1697 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1698 InFlag); 1699 InFlag = Chain.getValue(1); 1700 } 1701 1702 // If the callee is a GlobalAddress node (quite common, every direct call is) 1703 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1704 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1705 // We should use extra load for direct calls to dllimported functions in 1706 // non-JIT mode. 1707 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1708 getTargetMachine(), true)) 1709 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1710 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1711 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1712 1713 std::vector<MVT::ValueType> NodeTys; 1714 NodeTys.push_back(MVT::Other); // Returns a chain 1715 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 1716 std::vector<SDOperand> Ops; 1717 Ops.push_back(Chain); 1718 Ops.push_back(Callee); 1719 1720 // Add argument registers to the end of the list so that they are known live 1721 // into the call. 1722 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1723 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1724 RegsToPass[i].second.getValueType())); 1725 1726 if (InFlag.Val) 1727 Ops.push_back(InFlag); 1728 1729 // FIXME: Do not generate X86ISD::TAILCALL for now. 1730 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, 1731 NodeTys, &Ops[0], Ops.size()); 1732 InFlag = Chain.getValue(1); 1733 1734 NodeTys.clear(); 1735 NodeTys.push_back(MVT::Other); // Returns a chain 1736 if (RetVT != MVT::Other) 1737 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 1738 Ops.clear(); 1739 Ops.push_back(Chain); 1740 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1741 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1742 Ops.push_back(InFlag); 1743 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1744 if (RetVT != MVT::Other) 1745 InFlag = Chain.getValue(1); 1746 1747 std::vector<SDOperand> ResultVals; 1748 NodeTys.clear(); 1749 switch (RetVT) { 1750 default: assert(0 && "Unknown value type to return!"); 1751 case MVT::Other: break; 1752 case MVT::i8: 1753 Chain = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag).getValue(1); 1754 ResultVals.push_back(Chain.getValue(0)); 1755 NodeTys.push_back(MVT::i8); 1756 break; 1757 case MVT::i16: 1758 Chain = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag).getValue(1); 1759 ResultVals.push_back(Chain.getValue(0)); 1760 NodeTys.push_back(MVT::i16); 1761 break; 1762 case MVT::i32: 1763 if (Op.Val->getValueType(1) == MVT::i32) { 1764 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1); 1765 ResultVals.push_back(Chain.getValue(0)); 1766 Chain = DAG.getCopyFromReg(Chain, X86::EDX, MVT::i32, 1767 Chain.getValue(2)).getValue(1); 1768 ResultVals.push_back(Chain.getValue(0)); 1769 NodeTys.push_back(MVT::i32); 1770 } else { 1771 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1); 1772 ResultVals.push_back(Chain.getValue(0)); 1773 } 1774 NodeTys.push_back(MVT::i32); 1775 break; 1776 case MVT::v16i8: 1777 case MVT::v8i16: 1778 case MVT::v4i32: 1779 case MVT::v2i64: 1780 case MVT::v4f32: 1781 case MVT::v2f64: 1782 if (isFastCall) { 1783 assert(0 && "Unknown value type to return!"); 1784 } else { 1785 Chain = DAG.getCopyFromReg(Chain, X86::XMM0, RetVT, InFlag).getValue(1); 1786 ResultVals.push_back(Chain.getValue(0)); 1787 NodeTys.push_back(RetVT); 1788 } 1789 break; 1790 case MVT::f32: 1791 case MVT::f64: { 1792 std::vector<MVT::ValueType> Tys; 1793 Tys.push_back(MVT::f64); 1794 Tys.push_back(MVT::Other); 1795 Tys.push_back(MVT::Flag); 1796 std::vector<SDOperand> Ops; 1797 Ops.push_back(Chain); 1798 Ops.push_back(InFlag); 1799 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, 1800 &Ops[0], Ops.size()); 1801 Chain = RetVal.getValue(1); 1802 InFlag = RetVal.getValue(2); 1803 if (X86ScalarSSE) { 1804 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This 1805 // shouldn't be necessary except that RFP cannot be live across 1806 // multiple blocks. When stackifier is fixed, they can be uncoupled. 1807 MachineFunction &MF = DAG.getMachineFunction(); 1808 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 1809 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 1810 Tys.clear(); 1811 Tys.push_back(MVT::Other); 1812 Ops.clear(); 1813 Ops.push_back(Chain); 1814 Ops.push_back(RetVal); 1815 Ops.push_back(StackSlot); 1816 Ops.push_back(DAG.getValueType(RetVT)); 1817 Ops.push_back(InFlag); 1818 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 1819 RetVal = DAG.getLoad(RetVT, Chain, StackSlot, NULL, 0); 1820 Chain = RetVal.getValue(1); 1821 } 1822 1823 if (RetVT == MVT::f32 && !X86ScalarSSE) 1824 // FIXME: we would really like to remember that this FP_ROUND 1825 // operation is okay to eliminate if we allow excess FP precision. 1826 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal); 1827 ResultVals.push_back(RetVal); 1828 NodeTys.push_back(RetVT); 1829 break; 1830 } 1831 } 1832 1833 1834 // If the function returns void, just return the chain. 1835 if (ResultVals.empty()) 1836 return Chain; 1837 1838 // Otherwise, merge everything together with a MERGE_VALUES node. 1839 NodeTys.push_back(MVT::Other); 1840 ResultVals.push_back(Chain); 1841 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, 1842 &ResultVals[0], ResultVals.size()); 1843 return Res.getValue(Op.ResNo); 1844} 1845 1846//===----------------------------------------------------------------------===// 1847// StdCall Calling Convention implementation 1848//===----------------------------------------------------------------------===// 1849// StdCall calling convention seems to be standard for many Windows' API 1850// routines and around. It differs from C calling convention just a little: 1851// callee should clean up the stack, not caller. Symbols should be also 1852// decorated in some fancy way :) It doesn't support any vector arguments. 1853 1854/// HowToPassStdCallCCArgument - Returns how an formal argument of the specified 1855/// type should be passed. Returns the size of the stack slot 1856static void 1857HowToPassStdCallCCArgument(MVT::ValueType ObjectVT, unsigned &ObjSize) { 1858 switch (ObjectVT) { 1859 default: assert(0 && "Unhandled argument type!"); 1860 case MVT::i8: ObjSize = 1; break; 1861 case MVT::i16: ObjSize = 2; break; 1862 case MVT::i32: ObjSize = 4; break; 1863 case MVT::i64: ObjSize = 8; break; 1864 case MVT::f32: ObjSize = 4; break; 1865 case MVT::f64: ObjSize = 8; break; 1866 } 1867} 1868 1869SDOperand X86TargetLowering::LowerStdCallCCArguments(SDOperand Op, 1870 SelectionDAG &DAG) { 1871 unsigned NumArgs = Op.Val->getNumValues() - 1; 1872 MachineFunction &MF = DAG.getMachineFunction(); 1873 MachineFrameInfo *MFI = MF.getFrameInfo(); 1874 SDOperand Root = Op.getOperand(0); 1875 std::vector<SDOperand> ArgValues; 1876 1877 // Add DAG nodes to load the arguments... On entry to a function on the X86, 1878 // the stack frame looks like this: 1879 // 1880 // [ESP] -- return address 1881 // [ESP + 4] -- first argument (leftmost lexically) 1882 // [ESP + 8] -- second argument, if first argument is <= 4 bytes in size 1883 // ... 1884 // 1885 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot 1886 for (unsigned i = 0; i < NumArgs; ++i) { 1887 MVT::ValueType ObjectVT = Op.getValue(i).getValueType(); 1888 unsigned ArgIncrement = 4; 1889 unsigned ObjSize = 0; 1890 HowToPassStdCallCCArgument(ObjectVT, ObjSize); 1891 if (ObjSize > 4) 1892 ArgIncrement = ObjSize; 1893 1894 SDOperand ArgValue; 1895 // Create the frame index object for this incoming parameter... 1896 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); 1897 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 1898 ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0); 1899 ArgValues.push_back(ArgValue); 1900 ArgOffset += ArgIncrement; // Move on to the next argument... 1901 } 1902 1903 ArgValues.push_back(Root); 1904 1905 // If the function takes variable number of arguments, make a frame index for 1906 // the start of the first vararg value... for expansion of llvm.va_start. 1907 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1908 if (isVarArg) { 1909 BytesToPopOnReturn = 0; // Callee pops nothing. 1910 BytesCallerReserves = ArgOffset; 1911 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset); 1912 } else { 1913 BytesToPopOnReturn = ArgOffset; // Callee pops everything.. 1914 BytesCallerReserves = 0; 1915 } 1916 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only. 1917 ReturnAddrIndex = 0; // No return address slot generated yet. 1918 1919 MF.getInfo<X86FunctionInfo>()->setBytesToPopOnReturn(BytesToPopOnReturn); 1920 1921 // Return the new list of results. 1922 std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(), 1923 Op.Val->value_end()); 1924 return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size()); 1925} 1926 1927 1928SDOperand X86TargetLowering::LowerStdCallCCCallTo(SDOperand Op, 1929 SelectionDAG &DAG) { 1930 SDOperand Chain = Op.getOperand(0); 1931 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1932 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 1933 SDOperand Callee = Op.getOperand(4); 1934 MVT::ValueType RetVT= Op.Val->getValueType(0); 1935 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 1936 1937 // Count how many bytes are to be pushed on the stack. 1938 unsigned NumBytes = 0; 1939 for (unsigned i = 0; i != NumOps; ++i) { 1940 SDOperand Arg = Op.getOperand(5+2*i); 1941 1942 switch (Arg.getValueType()) { 1943 default: assert(0 && "Unexpected ValueType for argument!"); 1944 case MVT::i8: 1945 case MVT::i16: 1946 case MVT::i32: 1947 case MVT::f32: 1948 NumBytes += 4; 1949 break; 1950 case MVT::i64: 1951 case MVT::f64: 1952 NumBytes += 8; 1953 break; 1954 } 1955 } 1956 1957 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 1958 1959 // Arguments go on the stack in reverse order, as specified by the ABI. 1960 unsigned ArgOffset = 0; 1961 std::vector<SDOperand> MemOpChains; 1962 SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy()); 1963 for (unsigned i = 0; i != NumOps; ++i) { 1964 SDOperand Arg = Op.getOperand(5+2*i); 1965 1966 switch (Arg.getValueType()) { 1967 default: assert(0 && "Unexpected ValueType for argument!"); 1968 case MVT::i8: 1969 case MVT::i16: { 1970 // Promote the integer to 32 bits. If the input type is signed use a 1971 // sign extend, otherwise use a zero extend. 1972 unsigned ExtOp = 1973 dyn_cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue() ? 1974 ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 1975 Arg = DAG.getNode(ExtOp, MVT::i32, Arg); 1976 } 1977 // Fallthrough 1978 1979 case MVT::i32: 1980 case MVT::f32: { 1981 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1982 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1983 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1984 ArgOffset += 4; 1985 break; 1986 } 1987 case MVT::i64: 1988 case MVT::f64: { 1989 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1990 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1991 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1992 ArgOffset += 8; 1993 break; 1994 } 1995 } 1996 } 1997 1998 if (!MemOpChains.empty()) 1999 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 2000 &MemOpChains[0], MemOpChains.size()); 2001 2002 // If the callee is a GlobalAddress node (quite common, every direct call is) 2003 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 2004 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2005 // We should use extra load for direct calls to dllimported functions in 2006 // non-JIT mode. 2007 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), 2008 getTargetMachine(), true)) 2009 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 2010 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 2011 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 2012 2013 std::vector<MVT::ValueType> NodeTys; 2014 NodeTys.push_back(MVT::Other); // Returns a chain 2015 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 2016 std::vector<SDOperand> Ops; 2017 Ops.push_back(Chain); 2018 Ops.push_back(Callee); 2019 2020 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, 2021 NodeTys, &Ops[0], Ops.size()); 2022 SDOperand InFlag = Chain.getValue(1); 2023 2024 // Create the CALLSEQ_END node. 2025 unsigned NumBytesForCalleeToPush; 2026 2027 if (isVarArg) { 2028 NumBytesForCalleeToPush = 0; 2029 } else { 2030 NumBytesForCalleeToPush = NumBytes; 2031 } 2032 2033 NodeTys.clear(); 2034 NodeTys.push_back(MVT::Other); // Returns a chain 2035 if (RetVT != MVT::Other) 2036 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 2037 Ops.clear(); 2038 Ops.push_back(Chain); 2039 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 2040 Ops.push_back(DAG.getConstant(NumBytesForCalleeToPush, getPointerTy())); 2041 Ops.push_back(InFlag); 2042 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 2043 if (RetVT != MVT::Other) 2044 InFlag = Chain.getValue(1); 2045 2046 std::vector<SDOperand> ResultVals; 2047 NodeTys.clear(); 2048 switch (RetVT) { 2049 default: assert(0 && "Unknown value type to return!"); 2050 case MVT::Other: break; 2051 case MVT::i8: 2052 Chain = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag).getValue(1); 2053 ResultVals.push_back(Chain.getValue(0)); 2054 NodeTys.push_back(MVT::i8); 2055 break; 2056 case MVT::i16: 2057 Chain = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag).getValue(1); 2058 ResultVals.push_back(Chain.getValue(0)); 2059 NodeTys.push_back(MVT::i16); 2060 break; 2061 case MVT::i32: 2062 if (Op.Val->getValueType(1) == MVT::i32) { 2063 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1); 2064 ResultVals.push_back(Chain.getValue(0)); 2065 Chain = DAG.getCopyFromReg(Chain, X86::EDX, MVT::i32, 2066 Chain.getValue(2)).getValue(1); 2067 ResultVals.push_back(Chain.getValue(0)); 2068 NodeTys.push_back(MVT::i32); 2069 } else { 2070 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1); 2071 ResultVals.push_back(Chain.getValue(0)); 2072 } 2073 NodeTys.push_back(MVT::i32); 2074 break; 2075 case MVT::f32: 2076 case MVT::f64: { 2077 std::vector<MVT::ValueType> Tys; 2078 Tys.push_back(MVT::f64); 2079 Tys.push_back(MVT::Other); 2080 Tys.push_back(MVT::Flag); 2081 std::vector<SDOperand> Ops; 2082 Ops.push_back(Chain); 2083 Ops.push_back(InFlag); 2084 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, 2085 &Ops[0], Ops.size()); 2086 Chain = RetVal.getValue(1); 2087 InFlag = RetVal.getValue(2); 2088 if (X86ScalarSSE) { 2089 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This 2090 // shouldn't be necessary except that RFP cannot be live across 2091 // multiple blocks. When stackifier is fixed, they can be uncoupled. 2092 MachineFunction &MF = DAG.getMachineFunction(); 2093 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 2094 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 2095 Tys.clear(); 2096 Tys.push_back(MVT::Other); 2097 Ops.clear(); 2098 Ops.push_back(Chain); 2099 Ops.push_back(RetVal); 2100 Ops.push_back(StackSlot); 2101 Ops.push_back(DAG.getValueType(RetVT)); 2102 Ops.push_back(InFlag); 2103 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 2104 RetVal = DAG.getLoad(RetVT, Chain, StackSlot, NULL, 0); 2105 Chain = RetVal.getValue(1); 2106 } 2107 2108 if (RetVT == MVT::f32 && !X86ScalarSSE) 2109 // FIXME: we would really like to remember that this FP_ROUND 2110 // operation is okay to eliminate if we allow excess FP precision. 2111 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal); 2112 ResultVals.push_back(RetVal); 2113 NodeTys.push_back(RetVT); 2114 break; 2115 } 2116 } 2117 2118 // If the function returns void, just return the chain. 2119 if (ResultVals.empty()) 2120 return Chain; 2121 2122 // Otherwise, merge everything together with a MERGE_VALUES node. 2123 NodeTys.push_back(MVT::Other); 2124 ResultVals.push_back(Chain); 2125 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, 2126 &ResultVals[0], ResultVals.size()); 2127 return Res.getValue(Op.ResNo); 2128} 2129 2130//===----------------------------------------------------------------------===// 2131// FastCall Calling Convention implementation 2132//===----------------------------------------------------------------------===// 2133// 2134// The X86 'fastcall' calling convention passes up to two integer arguments in 2135// registers (an appropriate portion of ECX/EDX), passes arguments in C order, 2136// and requires that the callee pop its arguments off the stack (allowing proper 2137// tail calls), and has the same return value conventions as C calling convs. 2138// 2139// This calling convention always arranges for the callee pop value to be 8n+4 2140// bytes, which is needed for tail recursion elimination and stack alignment 2141// reasons. 2142// 2143 2144/// HowToPassFastCallCCArgument - Returns how an formal argument of the 2145/// specified type should be passed. If it is through stack, returns the size of 2146/// the stack slot; if it is through integer register, returns the number of 2147/// integer registers are needed. 2148static void 2149HowToPassFastCallCCArgument(MVT::ValueType ObjectVT, 2150 unsigned NumIntRegs, 2151 unsigned &ObjSize, 2152 unsigned &ObjIntRegs) 2153{ 2154 ObjSize = 0; 2155 ObjIntRegs = 0; 2156 2157 switch (ObjectVT) { 2158 default: assert(0 && "Unhandled argument type!"); 2159 case MVT::i8: 2160 if (NumIntRegs < 2) 2161 ObjIntRegs = 1; 2162 else 2163 ObjSize = 1; 2164 break; 2165 case MVT::i16: 2166 if (NumIntRegs < 2) 2167 ObjIntRegs = 1; 2168 else 2169 ObjSize = 2; 2170 break; 2171 case MVT::i32: 2172 if (NumIntRegs < 2) 2173 ObjIntRegs = 1; 2174 else 2175 ObjSize = 4; 2176 break; 2177 case MVT::i64: 2178 if (NumIntRegs+2 <= 2) { 2179 ObjIntRegs = 2; 2180 } else if (NumIntRegs+1 <= 2) { 2181 ObjIntRegs = 1; 2182 ObjSize = 4; 2183 } else 2184 ObjSize = 8; 2185 case MVT::f32: 2186 ObjSize = 4; 2187 break; 2188 case MVT::f64: 2189 ObjSize = 8; 2190 break; 2191 } 2192} 2193 2194SDOperand 2195X86TargetLowering::LowerFastCallCCArguments(SDOperand Op, SelectionDAG &DAG) { 2196 unsigned NumArgs = Op.Val->getNumValues()-1; 2197 MachineFunction &MF = DAG.getMachineFunction(); 2198 MachineFrameInfo *MFI = MF.getFrameInfo(); 2199 SDOperand Root = Op.getOperand(0); 2200 std::vector<SDOperand> ArgValues; 2201 2202 // Add DAG nodes to load the arguments... On entry to a function the stack 2203 // frame looks like this: 2204 // 2205 // [ESP] -- return address 2206 // [ESP + 4] -- first nonreg argument (leftmost lexically) 2207 // [ESP + 8] -- second nonreg argument, if 1st argument is <= 4 bytes in size 2208 // ... 2209 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot 2210 2211 // Keep track of the number of integer regs passed so far. This can be either 2212 // 0 (neither ECX or EDX used), 1 (ECX is used) or 2 (ECX and EDX are both 2213 // used). 2214 unsigned NumIntRegs = 0; 2215 2216 for (unsigned i = 0; i < NumArgs; ++i) { 2217 MVT::ValueType ObjectVT = Op.getValue(i).getValueType(); 2218 unsigned ArgIncrement = 4; 2219 unsigned ObjSize = 0; 2220 unsigned ObjIntRegs = 0; 2221 2222 HowToPassFastCallCCArgument(ObjectVT, NumIntRegs, ObjSize, ObjIntRegs); 2223 if (ObjSize > 4) 2224 ArgIncrement = ObjSize; 2225 2226 unsigned Reg = 0; 2227 SDOperand ArgValue; 2228 if (ObjIntRegs) { 2229 switch (ObjectVT) { 2230 default: assert(0 && "Unhandled argument type!"); 2231 case MVT::i8: 2232 Reg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::CL, 2233 X86::GR8RegisterClass); 2234 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i8); 2235 break; 2236 case MVT::i16: 2237 Reg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::CX, 2238 X86::GR16RegisterClass); 2239 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i16); 2240 break; 2241 case MVT::i32: 2242 Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::ECX, 2243 X86::GR32RegisterClass); 2244 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i32); 2245 break; 2246 case MVT::i64: 2247 Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::ECX, 2248 X86::GR32RegisterClass); 2249 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i32); 2250 if (ObjIntRegs == 2) { 2251 Reg = AddLiveIn(MF, X86::EDX, X86::GR32RegisterClass); 2252 SDOperand ArgValue2 = DAG.getCopyFromReg(Root, Reg, MVT::i32); 2253 ArgValue= DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2); 2254 } 2255 break; 2256 } 2257 2258 NumIntRegs += ObjIntRegs; 2259 } 2260 2261 if (ObjSize) { 2262 // Create the SelectionDAG nodes corresponding to a load from this 2263 // parameter. 2264 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); 2265 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 2266 if (ObjectVT == MVT::i64 && ObjIntRegs) { 2267 SDOperand ArgValue2 = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, 2268 NULL, 0); 2269 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2); 2270 } else 2271 ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0); 2272 ArgOffset += ArgIncrement; // Move on to the next argument. 2273 } 2274 2275 ArgValues.push_back(ArgValue); 2276 } 2277 2278 ArgValues.push_back(Root); 2279 2280 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 2281 // arguments and the arguments after the retaddr has been pushed are aligned. 2282 if ((ArgOffset & 7) == 0) 2283 ArgOffset += 4; 2284 2285 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 2286 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only. 2287 ReturnAddrIndex = 0; // No return address slot generated yet. 2288 BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments. 2289 BytesCallerReserves = 0; 2290 2291 MF.getInfo<X86FunctionInfo>()->setBytesToPopOnReturn(BytesToPopOnReturn); 2292 2293 // Finally, inform the code generator which regs we return values in. 2294 switch (getValueType(MF.getFunction()->getReturnType())) { 2295 default: assert(0 && "Unknown type!"); 2296 case MVT::isVoid: break; 2297 case MVT::i1: 2298 case MVT::i8: 2299 case MVT::i16: 2300 case MVT::i32: 2301 MF.addLiveOut(X86::ECX); 2302 break; 2303 case MVT::i64: 2304 MF.addLiveOut(X86::ECX); 2305 MF.addLiveOut(X86::EDX); 2306 break; 2307 case MVT::f32: 2308 case MVT::f64: 2309 MF.addLiveOut(X86::ST0); 2310 break; 2311 } 2312 2313 // Return the new list of results. 2314 std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(), 2315 Op.Val->value_end()); 2316 return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size()); 2317} 2318 2319SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { 2320 if (ReturnAddrIndex == 0) { 2321 // Set up a frame object for the return address. 2322 MachineFunction &MF = DAG.getMachineFunction(); 2323 if (Subtarget->is64Bit()) 2324 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8); 2325 else 2326 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4); 2327 } 2328 2329 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 2330} 2331 2332 2333 2334std::pair<SDOperand, SDOperand> X86TargetLowering:: 2335LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth, 2336 SelectionDAG &DAG) { 2337 SDOperand Result; 2338 if (Depth) // Depths > 0 not supported yet! 2339 Result = DAG.getConstant(0, getPointerTy()); 2340 else { 2341 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 2342 if (!isFrameAddress) 2343 // Just load the return address 2344 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, 2345 NULL, 0); 2346 else 2347 Result = DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI, 2348 DAG.getConstant(4, getPointerTy())); 2349 } 2350 return std::make_pair(Result, Chain); 2351} 2352 2353/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86 2354/// specific condition code. It returns a false if it cannot do a direct 2355/// translation. X86CC is the translated CondCode. LHS/RHS are modified as 2356/// needed. 2357static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 2358 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS, 2359 SelectionDAG &DAG) { 2360 X86CC = X86::COND_INVALID; 2361 if (!isFP) { 2362 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 2363 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 2364 // X > -1 -> X == 0, jump !sign. 2365 RHS = DAG.getConstant(0, RHS.getValueType()); 2366 X86CC = X86::COND_NS; 2367 return true; 2368 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 2369 // X < 0 -> X == 0, jump on sign. 2370 X86CC = X86::COND_S; 2371 return true; 2372 } 2373 } 2374 2375 switch (SetCCOpcode) { 2376 default: break; 2377 case ISD::SETEQ: X86CC = X86::COND_E; break; 2378 case ISD::SETGT: X86CC = X86::COND_G; break; 2379 case ISD::SETGE: X86CC = X86::COND_GE; break; 2380 case ISD::SETLT: X86CC = X86::COND_L; break; 2381 case ISD::SETLE: X86CC = X86::COND_LE; break; 2382 case ISD::SETNE: X86CC = X86::COND_NE; break; 2383 case ISD::SETULT: X86CC = X86::COND_B; break; 2384 case ISD::SETUGT: X86CC = X86::COND_A; break; 2385 case ISD::SETULE: X86CC = X86::COND_BE; break; 2386 case ISD::SETUGE: X86CC = X86::COND_AE; break; 2387 } 2388 } else { 2389 // On a floating point condition, the flags are set as follows: 2390 // ZF PF CF op 2391 // 0 | 0 | 0 | X > Y 2392 // 0 | 0 | 1 | X < Y 2393 // 1 | 0 | 0 | X == Y 2394 // 1 | 1 | 1 | unordered 2395 bool Flip = false; 2396 switch (SetCCOpcode) { 2397 default: break; 2398 case ISD::SETUEQ: 2399 case ISD::SETEQ: X86CC = X86::COND_E; break; 2400 case ISD::SETOLT: Flip = true; // Fallthrough 2401 case ISD::SETOGT: 2402 case ISD::SETGT: X86CC = X86::COND_A; break; 2403 case ISD::SETOLE: Flip = true; // Fallthrough 2404 case ISD::SETOGE: 2405 case ISD::SETGE: X86CC = X86::COND_AE; break; 2406 case ISD::SETUGT: Flip = true; // Fallthrough 2407 case ISD::SETULT: 2408 case ISD::SETLT: X86CC = X86::COND_B; break; 2409 case ISD::SETUGE: Flip = true; // Fallthrough 2410 case ISD::SETULE: 2411 case ISD::SETLE: X86CC = X86::COND_BE; break; 2412 case ISD::SETONE: 2413 case ISD::SETNE: X86CC = X86::COND_NE; break; 2414 case ISD::SETUO: X86CC = X86::COND_P; break; 2415 case ISD::SETO: X86CC = X86::COND_NP; break; 2416 } 2417 if (Flip) 2418 std::swap(LHS, RHS); 2419 } 2420 2421 return X86CC != X86::COND_INVALID; 2422} 2423 2424/// hasFPCMov - is there a floating point cmov for the specific X86 condition 2425/// code. Current x86 isa includes the following FP cmov instructions: 2426/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 2427static bool hasFPCMov(unsigned X86CC) { 2428 switch (X86CC) { 2429 default: 2430 return false; 2431 case X86::COND_B: 2432 case X86::COND_BE: 2433 case X86::COND_E: 2434 case X86::COND_P: 2435 case X86::COND_A: 2436 case X86::COND_AE: 2437 case X86::COND_NE: 2438 case X86::COND_NP: 2439 return true; 2440 } 2441} 2442 2443/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return 2444/// true if Op is undef or if its value falls within the specified range (L, H]. 2445static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { 2446 if (Op.getOpcode() == ISD::UNDEF) 2447 return true; 2448 2449 unsigned Val = cast<ConstantSDNode>(Op)->getValue(); 2450 return (Val >= Low && Val < Hi); 2451} 2452 2453/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return 2454/// true if Op is undef or if its value equal to the specified value. 2455static bool isUndefOrEqual(SDOperand Op, unsigned Val) { 2456 if (Op.getOpcode() == ISD::UNDEF) 2457 return true; 2458 return cast<ConstantSDNode>(Op)->getValue() == Val; 2459} 2460 2461/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand 2462/// specifies a shuffle of elements that is suitable for input to PSHUFD. 2463bool X86::isPSHUFDMask(SDNode *N) { 2464 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2465 2466 if (N->getNumOperands() != 4) 2467 return false; 2468 2469 // Check if the value doesn't reference the second vector. 2470 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2471 SDOperand Arg = N->getOperand(i); 2472 if (Arg.getOpcode() == ISD::UNDEF) continue; 2473 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2474 if (cast<ConstantSDNode>(Arg)->getValue() >= 4) 2475 return false; 2476 } 2477 2478 return true; 2479} 2480 2481/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand 2482/// specifies a shuffle of elements that is suitable for input to PSHUFHW. 2483bool X86::isPSHUFHWMask(SDNode *N) { 2484 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2485 2486 if (N->getNumOperands() != 8) 2487 return false; 2488 2489 // Lower quadword copied in order. 2490 for (unsigned i = 0; i != 4; ++i) { 2491 SDOperand Arg = N->getOperand(i); 2492 if (Arg.getOpcode() == ISD::UNDEF) continue; 2493 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2494 if (cast<ConstantSDNode>(Arg)->getValue() != i) 2495 return false; 2496 } 2497 2498 // Upper quadword shuffled. 2499 for (unsigned i = 4; i != 8; ++i) { 2500 SDOperand Arg = N->getOperand(i); 2501 if (Arg.getOpcode() == ISD::UNDEF) continue; 2502 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2503 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2504 if (Val < 4 || Val > 7) 2505 return false; 2506 } 2507 2508 return true; 2509} 2510 2511/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand 2512/// specifies a shuffle of elements that is suitable for input to PSHUFLW. 2513bool X86::isPSHUFLWMask(SDNode *N) { 2514 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2515 2516 if (N->getNumOperands() != 8) 2517 return false; 2518 2519 // Upper quadword copied in order. 2520 for (unsigned i = 4; i != 8; ++i) 2521 if (!isUndefOrEqual(N->getOperand(i), i)) 2522 return false; 2523 2524 // Lower quadword shuffled. 2525 for (unsigned i = 0; i != 4; ++i) 2526 if (!isUndefOrInRange(N->getOperand(i), 0, 4)) 2527 return false; 2528 2529 return true; 2530} 2531 2532/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 2533/// specifies a shuffle of elements that is suitable for input to SHUFP*. 2534static bool isSHUFPMask(std::vector<SDOperand> &N) { 2535 unsigned NumElems = N.size(); 2536 if (NumElems != 2 && NumElems != 4) return false; 2537 2538 unsigned Half = NumElems / 2; 2539 for (unsigned i = 0; i < Half; ++i) 2540 if (!isUndefOrInRange(N[i], 0, NumElems)) 2541 return false; 2542 for (unsigned i = Half; i < NumElems; ++i) 2543 if (!isUndefOrInRange(N[i], NumElems, NumElems*2)) 2544 return false; 2545 2546 return true; 2547} 2548 2549bool X86::isSHUFPMask(SDNode *N) { 2550 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2551 std::vector<SDOperand> Ops(N->op_begin(), N->op_end()); 2552 return ::isSHUFPMask(Ops); 2553} 2554 2555/// isCommutedSHUFP - Returns true if the shuffle mask is except 2556/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 2557/// half elements to come from vector 1 (which would equal the dest.) and 2558/// the upper half to come from vector 2. 2559static bool isCommutedSHUFP(std::vector<SDOperand> &Ops) { 2560 unsigned NumElems = Ops.size(); 2561 if (NumElems != 2 && NumElems != 4) return false; 2562 2563 unsigned Half = NumElems / 2; 2564 for (unsigned i = 0; i < Half; ++i) 2565 if (!isUndefOrInRange(Ops[i], NumElems, NumElems*2)) 2566 return false; 2567 for (unsigned i = Half; i < NumElems; ++i) 2568 if (!isUndefOrInRange(Ops[i], 0, NumElems)) 2569 return false; 2570 return true; 2571} 2572 2573static bool isCommutedSHUFP(SDNode *N) { 2574 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2575 std::vector<SDOperand> Ops(N->op_begin(), N->op_end()); 2576 return isCommutedSHUFP(Ops); 2577} 2578 2579/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 2580/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 2581bool X86::isMOVHLPSMask(SDNode *N) { 2582 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2583 2584 if (N->getNumOperands() != 4) 2585 return false; 2586 2587 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 2588 return isUndefOrEqual(N->getOperand(0), 6) && 2589 isUndefOrEqual(N->getOperand(1), 7) && 2590 isUndefOrEqual(N->getOperand(2), 2) && 2591 isUndefOrEqual(N->getOperand(3), 3); 2592} 2593 2594/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 2595/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 2596/// <2, 3, 2, 3> 2597bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) { 2598 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2599 2600 if (N->getNumOperands() != 4) 2601 return false; 2602 2603 // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3 2604 return isUndefOrEqual(N->getOperand(0), 2) && 2605 isUndefOrEqual(N->getOperand(1), 3) && 2606 isUndefOrEqual(N->getOperand(2), 2) && 2607 isUndefOrEqual(N->getOperand(3), 3); 2608} 2609 2610/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 2611/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 2612bool X86::isMOVLPMask(SDNode *N) { 2613 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2614 2615 unsigned NumElems = N->getNumOperands(); 2616 if (NumElems != 2 && NumElems != 4) 2617 return false; 2618 2619 for (unsigned i = 0; i < NumElems/2; ++i) 2620 if (!isUndefOrEqual(N->getOperand(i), i + NumElems)) 2621 return false; 2622 2623 for (unsigned i = NumElems/2; i < NumElems; ++i) 2624 if (!isUndefOrEqual(N->getOperand(i), i)) 2625 return false; 2626 2627 return true; 2628} 2629 2630/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand 2631/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D} 2632/// and MOVLHPS. 2633bool X86::isMOVHPMask(SDNode *N) { 2634 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2635 2636 unsigned NumElems = N->getNumOperands(); 2637 if (NumElems != 2 && NumElems != 4) 2638 return false; 2639 2640 for (unsigned i = 0; i < NumElems/2; ++i) 2641 if (!isUndefOrEqual(N->getOperand(i), i)) 2642 return false; 2643 2644 for (unsigned i = 0; i < NumElems/2; ++i) { 2645 SDOperand Arg = N->getOperand(i + NumElems/2); 2646 if (!isUndefOrEqual(Arg, i + NumElems)) 2647 return false; 2648 } 2649 2650 return true; 2651} 2652 2653/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 2654/// specifies a shuffle of elements that is suitable for input to UNPCKL. 2655bool static isUNPCKLMask(std::vector<SDOperand> &N, bool V2IsSplat = false) { 2656 unsigned NumElems = N.size(); 2657 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2658 return false; 2659 2660 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2661 SDOperand BitI = N[i]; 2662 SDOperand BitI1 = N[i+1]; 2663 if (!isUndefOrEqual(BitI, j)) 2664 return false; 2665 if (V2IsSplat) { 2666 if (isUndefOrEqual(BitI1, NumElems)) 2667 return false; 2668 } else { 2669 if (!isUndefOrEqual(BitI1, j + NumElems)) 2670 return false; 2671 } 2672 } 2673 2674 return true; 2675} 2676 2677bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) { 2678 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2679 std::vector<SDOperand> Ops(N->op_begin(), N->op_end()); 2680 return ::isUNPCKLMask(Ops, V2IsSplat); 2681} 2682 2683/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 2684/// specifies a shuffle of elements that is suitable for input to UNPCKH. 2685bool static isUNPCKHMask(std::vector<SDOperand> &N, bool V2IsSplat = false) { 2686 unsigned NumElems = N.size(); 2687 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2688 return false; 2689 2690 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2691 SDOperand BitI = N[i]; 2692 SDOperand BitI1 = N[i+1]; 2693 if (!isUndefOrEqual(BitI, j + NumElems/2)) 2694 return false; 2695 if (V2IsSplat) { 2696 if (isUndefOrEqual(BitI1, NumElems)) 2697 return false; 2698 } else { 2699 if (!isUndefOrEqual(BitI1, j + NumElems/2 + NumElems)) 2700 return false; 2701 } 2702 } 2703 2704 return true; 2705} 2706 2707bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) { 2708 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2709 std::vector<SDOperand> Ops(N->op_begin(), N->op_end()); 2710 return ::isUNPCKHMask(Ops, V2IsSplat); 2711} 2712 2713/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 2714/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 2715/// <0, 0, 1, 1> 2716bool X86::isUNPCKL_v_undef_Mask(SDNode *N) { 2717 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2718 2719 unsigned NumElems = N->getNumOperands(); 2720 if (NumElems != 4 && NumElems != 8 && NumElems != 16) 2721 return false; 2722 2723 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2724 SDOperand BitI = N->getOperand(i); 2725 SDOperand BitI1 = N->getOperand(i+1); 2726 2727 if (!isUndefOrEqual(BitI, j)) 2728 return false; 2729 if (!isUndefOrEqual(BitI1, j)) 2730 return false; 2731 } 2732 2733 return true; 2734} 2735 2736/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 2737/// specifies a shuffle of elements that is suitable for input to MOVSS, 2738/// MOVSD, and MOVD, i.e. setting the lowest element. 2739static bool isMOVLMask(std::vector<SDOperand> &N) { 2740 unsigned NumElems = N.size(); 2741 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2742 return false; 2743 2744 if (!isUndefOrEqual(N[0], NumElems)) 2745 return false; 2746 2747 for (unsigned i = 1; i < NumElems; ++i) { 2748 SDOperand Arg = N[i]; 2749 if (!isUndefOrEqual(Arg, i)) 2750 return false; 2751 } 2752 2753 return true; 2754} 2755 2756bool X86::isMOVLMask(SDNode *N) { 2757 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2758 std::vector<SDOperand> Ops(N->op_begin(), N->op_end()); 2759 return ::isMOVLMask(Ops); 2760} 2761 2762/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 2763/// of what x86 movss want. X86 movs requires the lowest element to be lowest 2764/// element of vector 2 and the other elements to come from vector 1 in order. 2765static bool isCommutedMOVL(std::vector<SDOperand> &Ops, bool V2IsSplat = false, 2766 bool V2IsUndef = false) { 2767 unsigned NumElems = Ops.size(); 2768 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2769 return false; 2770 2771 if (!isUndefOrEqual(Ops[0], 0)) 2772 return false; 2773 2774 for (unsigned i = 1; i < NumElems; ++i) { 2775 SDOperand Arg = Ops[i]; 2776 if (!(isUndefOrEqual(Arg, i+NumElems) || 2777 (V2IsUndef && isUndefOrInRange(Arg, NumElems, NumElems*2)) || 2778 (V2IsSplat && isUndefOrEqual(Arg, NumElems)))) 2779 return false; 2780 } 2781 2782 return true; 2783} 2784 2785static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false, 2786 bool V2IsUndef = false) { 2787 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2788 std::vector<SDOperand> Ops(N->op_begin(), N->op_end()); 2789 return isCommutedMOVL(Ops, V2IsSplat, V2IsUndef); 2790} 2791 2792/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2793/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 2794bool X86::isMOVSHDUPMask(SDNode *N) { 2795 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2796 2797 if (N->getNumOperands() != 4) 2798 return false; 2799 2800 // Expect 1, 1, 3, 3 2801 for (unsigned i = 0; i < 2; ++i) { 2802 SDOperand Arg = N->getOperand(i); 2803 if (Arg.getOpcode() == ISD::UNDEF) continue; 2804 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2805 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2806 if (Val != 1) return false; 2807 } 2808 2809 bool HasHi = false; 2810 for (unsigned i = 2; i < 4; ++i) { 2811 SDOperand Arg = N->getOperand(i); 2812 if (Arg.getOpcode() == ISD::UNDEF) continue; 2813 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2814 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2815 if (Val != 3) return false; 2816 HasHi = true; 2817 } 2818 2819 // Don't use movshdup if it can be done with a shufps. 2820 return HasHi; 2821} 2822 2823/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2824/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 2825bool X86::isMOVSLDUPMask(SDNode *N) { 2826 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2827 2828 if (N->getNumOperands() != 4) 2829 return false; 2830 2831 // Expect 0, 0, 2, 2 2832 for (unsigned i = 0; i < 2; ++i) { 2833 SDOperand Arg = N->getOperand(i); 2834 if (Arg.getOpcode() == ISD::UNDEF) continue; 2835 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2836 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2837 if (Val != 0) return false; 2838 } 2839 2840 bool HasHi = false; 2841 for (unsigned i = 2; i < 4; ++i) { 2842 SDOperand Arg = N->getOperand(i); 2843 if (Arg.getOpcode() == ISD::UNDEF) continue; 2844 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2845 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2846 if (Val != 2) return false; 2847 HasHi = true; 2848 } 2849 2850 // Don't use movshdup if it can be done with a shufps. 2851 return HasHi; 2852} 2853 2854/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2855/// a splat of a single element. 2856static bool isSplatMask(SDNode *N) { 2857 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2858 2859 // This is a splat operation if each element of the permute is the same, and 2860 // if the value doesn't reference the second vector. 2861 unsigned NumElems = N->getNumOperands(); 2862 SDOperand ElementBase; 2863 unsigned i = 0; 2864 for (; i != NumElems; ++i) { 2865 SDOperand Elt = N->getOperand(i); 2866 if (isa<ConstantSDNode>(Elt)) { 2867 ElementBase = Elt; 2868 break; 2869 } 2870 } 2871 2872 if (!ElementBase.Val) 2873 return false; 2874 2875 for (; i != NumElems; ++i) { 2876 SDOperand Arg = N->getOperand(i); 2877 if (Arg.getOpcode() == ISD::UNDEF) continue; 2878 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2879 if (Arg != ElementBase) return false; 2880 } 2881 2882 // Make sure it is a splat of the first vector operand. 2883 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems; 2884} 2885 2886/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2887/// a splat of a single element and it's a 2 or 4 element mask. 2888bool X86::isSplatMask(SDNode *N) { 2889 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2890 2891 // We can only splat 64-bit, and 32-bit quantities with a single instruction. 2892 if (N->getNumOperands() != 4 && N->getNumOperands() != 2) 2893 return false; 2894 return ::isSplatMask(N); 2895} 2896 2897/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand 2898/// specifies a splat of zero element. 2899bool X86::isSplatLoMask(SDNode *N) { 2900 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2901 2902 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) 2903 if (!isUndefOrEqual(N->getOperand(i), 0)) 2904 return false; 2905 return true; 2906} 2907 2908/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 2909/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* 2910/// instructions. 2911unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 2912 unsigned NumOperands = N->getNumOperands(); 2913 unsigned Shift = (NumOperands == 4) ? 2 : 1; 2914 unsigned Mask = 0; 2915 for (unsigned i = 0; i < NumOperands; ++i) { 2916 unsigned Val = 0; 2917 SDOperand Arg = N->getOperand(NumOperands-i-1); 2918 if (Arg.getOpcode() != ISD::UNDEF) 2919 Val = cast<ConstantSDNode>(Arg)->getValue(); 2920 if (Val >= NumOperands) Val -= NumOperands; 2921 Mask |= Val; 2922 if (i != NumOperands - 1) 2923 Mask <<= Shift; 2924 } 2925 2926 return Mask; 2927} 2928 2929/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 2930/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW 2931/// instructions. 2932unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 2933 unsigned Mask = 0; 2934 // 8 nodes, but we only care about the last 4. 2935 for (unsigned i = 7; i >= 4; --i) { 2936 unsigned Val = 0; 2937 SDOperand Arg = N->getOperand(i); 2938 if (Arg.getOpcode() != ISD::UNDEF) 2939 Val = cast<ConstantSDNode>(Arg)->getValue(); 2940 Mask |= (Val - 4); 2941 if (i != 4) 2942 Mask <<= 2; 2943 } 2944 2945 return Mask; 2946} 2947 2948/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 2949/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW 2950/// instructions. 2951unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 2952 unsigned Mask = 0; 2953 // 8 nodes, but we only care about the first 4. 2954 for (int i = 3; i >= 0; --i) { 2955 unsigned Val = 0; 2956 SDOperand Arg = N->getOperand(i); 2957 if (Arg.getOpcode() != ISD::UNDEF) 2958 Val = cast<ConstantSDNode>(Arg)->getValue(); 2959 Mask |= Val; 2960 if (i != 0) 2961 Mask <<= 2; 2962 } 2963 2964 return Mask; 2965} 2966 2967/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand 2968/// specifies a 8 element shuffle that can be broken into a pair of 2969/// PSHUFHW and PSHUFLW. 2970static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { 2971 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2972 2973 if (N->getNumOperands() != 8) 2974 return false; 2975 2976 // Lower quadword shuffled. 2977 for (unsigned i = 0; i != 4; ++i) { 2978 SDOperand Arg = N->getOperand(i); 2979 if (Arg.getOpcode() == ISD::UNDEF) continue; 2980 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2981 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2982 if (Val > 4) 2983 return false; 2984 } 2985 2986 // Upper quadword shuffled. 2987 for (unsigned i = 4; i != 8; ++i) { 2988 SDOperand Arg = N->getOperand(i); 2989 if (Arg.getOpcode() == ISD::UNDEF) continue; 2990 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2991 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2992 if (Val < 4 || Val > 7) 2993 return false; 2994 } 2995 2996 return true; 2997} 2998 2999/// CommuteVectorShuffle - Swap vector_shuffle operandsas well as 3000/// values in ther permute mask. 3001static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, 3002 SDOperand &V2, SDOperand &Mask, 3003 SelectionDAG &DAG) { 3004 MVT::ValueType VT = Op.getValueType(); 3005 MVT::ValueType MaskVT = Mask.getValueType(); 3006 MVT::ValueType EltVT = MVT::getVectorBaseType(MaskVT); 3007 unsigned NumElems = Mask.getNumOperands(); 3008 std::vector<SDOperand> MaskVec; 3009 3010 for (unsigned i = 0; i != NumElems; ++i) { 3011 SDOperand Arg = Mask.getOperand(i); 3012 if (Arg.getOpcode() == ISD::UNDEF) { 3013 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 3014 continue; 3015 } 3016 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 3017 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 3018 if (Val < NumElems) 3019 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 3020 else 3021 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 3022 } 3023 3024 std::swap(V1, V2); 3025 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 3026 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 3027} 3028 3029/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 3030/// match movhlps. The lower half elements should come from upper half of 3031/// V1 (and in order), and the upper half elements should come from the upper 3032/// half of V2 (and in order). 3033static bool ShouldXformToMOVHLPS(SDNode *Mask) { 3034 unsigned NumElems = Mask->getNumOperands(); 3035 if (NumElems != 4) 3036 return false; 3037 for (unsigned i = 0, e = 2; i != e; ++i) 3038 if (!isUndefOrEqual(Mask->getOperand(i), i+2)) 3039 return false; 3040 for (unsigned i = 2; i != 4; ++i) 3041 if (!isUndefOrEqual(Mask->getOperand(i), i+4)) 3042 return false; 3043 return true; 3044} 3045 3046/// isScalarLoadToVector - Returns true if the node is a scalar load that 3047/// is promoted to a vector. 3048static inline bool isScalarLoadToVector(SDNode *N) { 3049 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) { 3050 N = N->getOperand(0).Val; 3051 return ISD::isNON_EXTLoad(N); 3052 } 3053 return false; 3054} 3055 3056/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 3057/// match movlp{s|d}. The lower half elements should come from lower half of 3058/// V1 (and in order), and the upper half elements should come from the upper 3059/// half of V2 (and in order). And since V1 will become the source of the 3060/// MOVLP, it must be either a vector load or a scalar load to vector. 3061static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) { 3062 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 3063 return false; 3064 // Is V2 is a vector load, don't do this transformation. We will try to use 3065 // load folding shufps op. 3066 if (ISD::isNON_EXTLoad(V2)) 3067 return false; 3068 3069 unsigned NumElems = Mask->getNumOperands(); 3070 if (NumElems != 2 && NumElems != 4) 3071 return false; 3072 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3073 if (!isUndefOrEqual(Mask->getOperand(i), i)) 3074 return false; 3075 for (unsigned i = NumElems/2; i != NumElems; ++i) 3076 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems)) 3077 return false; 3078 return true; 3079} 3080 3081/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 3082/// all the same. 3083static bool isSplatVector(SDNode *N) { 3084 if (N->getOpcode() != ISD::BUILD_VECTOR) 3085 return false; 3086 3087 SDOperand SplatValue = N->getOperand(0); 3088 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 3089 if (N->getOperand(i) != SplatValue) 3090 return false; 3091 return true; 3092} 3093 3094/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 3095/// to an undef. 3096static bool isUndefShuffle(SDNode *N) { 3097 if (N->getOpcode() != ISD::BUILD_VECTOR) 3098 return false; 3099 3100 SDOperand V1 = N->getOperand(0); 3101 SDOperand V2 = N->getOperand(1); 3102 SDOperand Mask = N->getOperand(2); 3103 unsigned NumElems = Mask.getNumOperands(); 3104 for (unsigned i = 0; i != NumElems; ++i) { 3105 SDOperand Arg = Mask.getOperand(i); 3106 if (Arg.getOpcode() != ISD::UNDEF) { 3107 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 3108 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF) 3109 return false; 3110 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF) 3111 return false; 3112 } 3113 } 3114 return true; 3115} 3116 3117/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 3118/// that point to V2 points to its first element. 3119static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { 3120 assert(Mask.getOpcode() == ISD::BUILD_VECTOR); 3121 3122 bool Changed = false; 3123 std::vector<SDOperand> MaskVec; 3124 unsigned NumElems = Mask.getNumOperands(); 3125 for (unsigned i = 0; i != NumElems; ++i) { 3126 SDOperand Arg = Mask.getOperand(i); 3127 if (Arg.getOpcode() != ISD::UNDEF) { 3128 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 3129 if (Val > NumElems) { 3130 Arg = DAG.getConstant(NumElems, Arg.getValueType()); 3131 Changed = true; 3132 } 3133 } 3134 MaskVec.push_back(Arg); 3135 } 3136 3137 if (Changed) 3138 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), 3139 &MaskVec[0], MaskVec.size()); 3140 return Mask; 3141} 3142 3143/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 3144/// operation of specified width. 3145static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { 3146 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3147 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 3148 3149 std::vector<SDOperand> MaskVec; 3150 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); 3151 for (unsigned i = 1; i != NumElems; ++i) 3152 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3153 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 3154} 3155 3156/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation 3157/// of specified width. 3158static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { 3159 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3160 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 3161 std::vector<SDOperand> MaskVec; 3162 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 3163 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3164 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); 3165 } 3166 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 3167} 3168 3169/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation 3170/// of specified width. 3171static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { 3172 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3173 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 3174 unsigned Half = NumElems/2; 3175 std::vector<SDOperand> MaskVec; 3176 for (unsigned i = 0; i != Half; ++i) { 3177 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); 3178 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); 3179 } 3180 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 3181} 3182 3183/// getZeroVector - Returns a vector of specified type with all zero elements. 3184/// 3185static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) { 3186 assert(MVT::isVector(VT) && "Expected a vector type"); 3187 unsigned NumElems = getVectorNumElements(VT); 3188 MVT::ValueType EVT = MVT::getVectorBaseType(VT); 3189 bool isFP = MVT::isFloatingPoint(EVT); 3190 SDOperand Zero = isFP ? DAG.getConstantFP(0.0, EVT) : DAG.getConstant(0, EVT); 3191 std::vector<SDOperand> ZeroVec(NumElems, Zero); 3192 return DAG.getNode(ISD::BUILD_VECTOR, VT, &ZeroVec[0], ZeroVec.size()); 3193} 3194 3195/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32. 3196/// 3197static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) { 3198 SDOperand V1 = Op.getOperand(0); 3199 SDOperand Mask = Op.getOperand(2); 3200 MVT::ValueType VT = Op.getValueType(); 3201 unsigned NumElems = Mask.getNumOperands(); 3202 Mask = getUnpacklMask(NumElems, DAG); 3203 while (NumElems != 4) { 3204 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask); 3205 NumElems >>= 1; 3206 } 3207 V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1); 3208 3209 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3210 Mask = getZeroVector(MaskVT, DAG); 3211 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1, 3212 DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask); 3213 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); 3214} 3215 3216/// isZeroNode - Returns true if Elt is a constant zero or a floating point 3217/// constant +0.0. 3218static inline bool isZeroNode(SDOperand Elt) { 3219 return ((isa<ConstantSDNode>(Elt) && 3220 cast<ConstantSDNode>(Elt)->getValue() == 0) || 3221 (isa<ConstantFPSDNode>(Elt) && 3222 cast<ConstantFPSDNode>(Elt)->isExactlyValue(0.0))); 3223} 3224 3225/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 3226/// vector and zero or undef vector. 3227static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT, 3228 unsigned NumElems, unsigned Idx, 3229 bool isZero, SelectionDAG &DAG) { 3230 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT); 3231 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3232 MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT); 3233 SDOperand Zero = DAG.getConstant(0, EVT); 3234 std::vector<SDOperand> MaskVec(NumElems, Zero); 3235 MaskVec[Idx] = DAG.getConstant(NumElems, EVT); 3236 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3237 &MaskVec[0], MaskVec.size()); 3238 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 3239} 3240 3241/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 3242/// 3243static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, 3244 unsigned NumNonZero, unsigned NumZero, 3245 SelectionDAG &DAG, TargetLowering &TLI) { 3246 if (NumNonZero > 8) 3247 return SDOperand(); 3248 3249 SDOperand V(0, 0); 3250 bool First = true; 3251 for (unsigned i = 0; i < 16; ++i) { 3252 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 3253 if (ThisIsNonZero && First) { 3254 if (NumZero) 3255 V = getZeroVector(MVT::v8i16, DAG); 3256 else 3257 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 3258 First = false; 3259 } 3260 3261 if ((i & 1) != 0) { 3262 SDOperand ThisElt(0, 0), LastElt(0, 0); 3263 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 3264 if (LastIsNonZero) { 3265 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); 3266 } 3267 if (ThisIsNonZero) { 3268 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i)); 3269 ThisElt = DAG.getNode(ISD::SHL, MVT::i16, 3270 ThisElt, DAG.getConstant(8, MVT::i8)); 3271 if (LastIsNonZero) 3272 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt); 3273 } else 3274 ThisElt = LastElt; 3275 3276 if (ThisElt.Val) 3277 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt, 3278 DAG.getConstant(i/2, TLI.getPointerTy())); 3279 } 3280 } 3281 3282 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V); 3283} 3284 3285/// LowerBuildVectorv16i8 - Custom lower build_vector of v8i16. 3286/// 3287static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, 3288 unsigned NumNonZero, unsigned NumZero, 3289 SelectionDAG &DAG, TargetLowering &TLI) { 3290 if (NumNonZero > 4) 3291 return SDOperand(); 3292 3293 SDOperand V(0, 0); 3294 bool First = true; 3295 for (unsigned i = 0; i < 8; ++i) { 3296 bool isNonZero = (NonZeros & (1 << i)) != 0; 3297 if (isNonZero) { 3298 if (First) { 3299 if (NumZero) 3300 V = getZeroVector(MVT::v8i16, DAG); 3301 else 3302 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 3303 First = false; 3304 } 3305 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i), 3306 DAG.getConstant(i, TLI.getPointerTy())); 3307 } 3308 } 3309 3310 return V; 3311} 3312 3313SDOperand 3314X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3315 // All zero's are handled with pxor. 3316 if (ISD::isBuildVectorAllZeros(Op.Val)) 3317 return Op; 3318 3319 // All one's are handled with pcmpeqd. 3320 if (ISD::isBuildVectorAllOnes(Op.Val)) 3321 return Op; 3322 3323 MVT::ValueType VT = Op.getValueType(); 3324 MVT::ValueType EVT = MVT::getVectorBaseType(VT); 3325 unsigned EVTBits = MVT::getSizeInBits(EVT); 3326 3327 unsigned NumElems = Op.getNumOperands(); 3328 unsigned NumZero = 0; 3329 unsigned NumNonZero = 0; 3330 unsigned NonZeros = 0; 3331 std::set<SDOperand> Values; 3332 for (unsigned i = 0; i < NumElems; ++i) { 3333 SDOperand Elt = Op.getOperand(i); 3334 if (Elt.getOpcode() != ISD::UNDEF) { 3335 Values.insert(Elt); 3336 if (isZeroNode(Elt)) 3337 NumZero++; 3338 else { 3339 NonZeros |= (1 << i); 3340 NumNonZero++; 3341 } 3342 } 3343 } 3344 3345 if (NumNonZero == 0) 3346 // Must be a mix of zero and undef. Return a zero vector. 3347 return getZeroVector(VT, DAG); 3348 3349 // Splat is obviously ok. Let legalizer expand it to a shuffle. 3350 if (Values.size() == 1) 3351 return SDOperand(); 3352 3353 // Special case for single non-zero element. 3354 if (NumNonZero == 1) { 3355 unsigned Idx = CountTrailingZeros_32(NonZeros); 3356 SDOperand Item = Op.getOperand(Idx); 3357 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 3358 if (Idx == 0) 3359 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 3360 return getShuffleVectorZeroOrUndef(Item, VT, NumElems, Idx, 3361 NumZero > 0, DAG); 3362 3363 if (EVTBits == 32) { 3364 // Turn it into a shuffle of zero and zero-extended scalar to vector. 3365 Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0, 3366 DAG); 3367 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3368 MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT); 3369 std::vector<SDOperand> MaskVec; 3370 for (unsigned i = 0; i < NumElems; i++) 3371 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); 3372 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3373 &MaskVec[0], MaskVec.size()); 3374 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item, 3375 DAG.getNode(ISD::UNDEF, VT), Mask); 3376 } 3377 } 3378 3379 // Let legalizer expand 2-wide build_vector's. 3380 if (EVTBits == 64) 3381 return SDOperand(); 3382 3383 // If element VT is < 32 bits, convert it to inserts into a zero vector. 3384 if (EVTBits == 8) { 3385 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 3386 *this); 3387 if (V.Val) return V; 3388 } 3389 3390 if (EVTBits == 16) { 3391 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 3392 *this); 3393 if (V.Val) return V; 3394 } 3395 3396 // If element VT is == 32 bits, turn it into a number of shuffles. 3397 std::vector<SDOperand> V(NumElems); 3398 if (NumElems == 4 && NumZero > 0) { 3399 for (unsigned i = 0; i < 4; ++i) { 3400 bool isZero = !(NonZeros & (1 << i)); 3401 if (isZero) 3402 V[i] = getZeroVector(VT, DAG); 3403 else 3404 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3405 } 3406 3407 for (unsigned i = 0; i < 2; ++i) { 3408 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 3409 default: break; 3410 case 0: 3411 V[i] = V[i*2]; // Must be a zero vector. 3412 break; 3413 case 1: 3414 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2], 3415 getMOVLMask(NumElems, DAG)); 3416 break; 3417 case 2: 3418 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3419 getMOVLMask(NumElems, DAG)); 3420 break; 3421 case 3: 3422 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3423 getUnpacklMask(NumElems, DAG)); 3424 break; 3425 } 3426 } 3427 3428 // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd) 3429 // clears the upper bits. 3430 // FIXME: we can do the same for v4f32 case when we know both parts of 3431 // the lower half come from scalar_to_vector (loadf32). We should do 3432 // that in post legalizer dag combiner with target specific hooks. 3433 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0) 3434 return V[0]; 3435 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3436 MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT); 3437 std::vector<SDOperand> MaskVec; 3438 bool Reverse = (NonZeros & 0x3) == 2; 3439 for (unsigned i = 0; i < 2; ++i) 3440 if (Reverse) 3441 MaskVec.push_back(DAG.getConstant(1-i, EVT)); 3442 else 3443 MaskVec.push_back(DAG.getConstant(i, EVT)); 3444 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 3445 for (unsigned i = 0; i < 2; ++i) 3446 if (Reverse) 3447 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); 3448 else 3449 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); 3450 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3451 &MaskVec[0], MaskVec.size()); 3452 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask); 3453 } 3454 3455 if (Values.size() > 2) { 3456 // Expand into a number of unpckl*. 3457 // e.g. for v4f32 3458 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 3459 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 3460 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 3461 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG); 3462 for (unsigned i = 0; i < NumElems; ++i) 3463 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3464 NumElems >>= 1; 3465 while (NumElems != 0) { 3466 for (unsigned i = 0; i < NumElems; ++i) 3467 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems], 3468 UnpckMask); 3469 NumElems >>= 1; 3470 } 3471 return V[0]; 3472 } 3473 3474 return SDOperand(); 3475} 3476 3477SDOperand 3478X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 3479 SDOperand V1 = Op.getOperand(0); 3480 SDOperand V2 = Op.getOperand(1); 3481 SDOperand PermMask = Op.getOperand(2); 3482 MVT::ValueType VT = Op.getValueType(); 3483 unsigned NumElems = PermMask.getNumOperands(); 3484 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 3485 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 3486 bool V1IsSplat = false; 3487 bool V2IsSplat = false; 3488 3489 if (isUndefShuffle(Op.Val)) 3490 return DAG.getNode(ISD::UNDEF, VT); 3491 3492 if (isSplatMask(PermMask.Val)) { 3493 if (NumElems <= 4) return Op; 3494 // Promote it to a v4i32 splat. 3495 return PromoteSplat(Op, DAG); 3496 } 3497 3498 if (X86::isMOVLMask(PermMask.Val)) 3499 return (V1IsUndef) ? V2 : Op; 3500 3501 if (X86::isMOVSHDUPMask(PermMask.Val) || 3502 X86::isMOVSLDUPMask(PermMask.Val) || 3503 X86::isMOVHLPSMask(PermMask.Val) || 3504 X86::isMOVHPMask(PermMask.Val) || 3505 X86::isMOVLPMask(PermMask.Val)) 3506 return Op; 3507 3508 if (ShouldXformToMOVHLPS(PermMask.Val) || 3509 ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val)) 3510 return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3511 3512 bool Commuted = false; 3513 V1IsSplat = isSplatVector(V1.Val); 3514 V2IsSplat = isSplatVector(V2.Val); 3515 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 3516 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3517 std::swap(V1IsSplat, V2IsSplat); 3518 std::swap(V1IsUndef, V2IsUndef); 3519 Commuted = true; 3520 } 3521 3522 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) { 3523 if (V2IsUndef) return V1; 3524 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3525 if (V2IsSplat) { 3526 // V2 is a splat, so the mask may be malformed. That is, it may point 3527 // to any V2 element. The instruction selectior won't like this. Get 3528 // a corrected mask and commute to form a proper MOVS{S|D}. 3529 SDOperand NewMask = getMOVLMask(NumElems, DAG); 3530 if (NewMask.Val != PermMask.Val) 3531 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3532 } 3533 return Op; 3534 } 3535 3536 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3537 X86::isUNPCKLMask(PermMask.Val) || 3538 X86::isUNPCKHMask(PermMask.Val)) 3539 return Op; 3540 3541 if (V2IsSplat) { 3542 // Normalize mask so all entries that point to V2 points to its first 3543 // element then try to match unpck{h|l} again. If match, return a 3544 // new vector_shuffle with the corrected mask. 3545 SDOperand NewMask = NormalizeMask(PermMask, DAG); 3546 if (NewMask.Val != PermMask.Val) { 3547 if (X86::isUNPCKLMask(PermMask.Val, true)) { 3548 SDOperand NewMask = getUnpacklMask(NumElems, DAG); 3549 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3550 } else if (X86::isUNPCKHMask(PermMask.Val, true)) { 3551 SDOperand NewMask = getUnpackhMask(NumElems, DAG); 3552 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3553 } 3554 } 3555 } 3556 3557 // Normalize the node to match x86 shuffle ops if needed 3558 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val)) 3559 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3560 3561 if (Commuted) { 3562 // Commute is back and try unpck* again. 3563 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3564 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3565 X86::isUNPCKLMask(PermMask.Val) || 3566 X86::isUNPCKHMask(PermMask.Val)) 3567 return Op; 3568 } 3569 3570 // If VT is integer, try PSHUF* first, then SHUFP*. 3571 if (MVT::isInteger(VT)) { 3572 if (X86::isPSHUFDMask(PermMask.Val) || 3573 X86::isPSHUFHWMask(PermMask.Val) || 3574 X86::isPSHUFLWMask(PermMask.Val)) { 3575 if (V2.getOpcode() != ISD::UNDEF) 3576 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3577 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3578 return Op; 3579 } 3580 3581 if (X86::isSHUFPMask(PermMask.Val)) 3582 return Op; 3583 3584 // Handle v8i16 shuffle high / low shuffle node pair. 3585 if (VT == MVT::v8i16 && isPSHUFHW_PSHUFLWMask(PermMask.Val)) { 3586 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3587 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 3588 std::vector<SDOperand> MaskVec; 3589 for (unsigned i = 0; i != 4; ++i) 3590 MaskVec.push_back(PermMask.getOperand(i)); 3591 for (unsigned i = 4; i != 8; ++i) 3592 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3593 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3594 &MaskVec[0], MaskVec.size()); 3595 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 3596 MaskVec.clear(); 3597 for (unsigned i = 0; i != 4; ++i) 3598 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3599 for (unsigned i = 4; i != 8; ++i) 3600 MaskVec.push_back(PermMask.getOperand(i)); 3601 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0],MaskVec.size()); 3602 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 3603 } 3604 } else { 3605 // Floating point cases in the other order. 3606 if (X86::isSHUFPMask(PermMask.Val)) 3607 return Op; 3608 if (X86::isPSHUFDMask(PermMask.Val) || 3609 X86::isPSHUFHWMask(PermMask.Val) || 3610 X86::isPSHUFLWMask(PermMask.Val)) { 3611 if (V2.getOpcode() != ISD::UNDEF) 3612 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3613 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3614 return Op; 3615 } 3616 } 3617 3618 if (NumElems == 4) { 3619 MVT::ValueType MaskVT = PermMask.getValueType(); 3620 MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT); 3621 std::vector<std::pair<int, int> > Locs; 3622 Locs.reserve(NumElems); 3623 std::vector<SDOperand> Mask1(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3624 std::vector<SDOperand> Mask2(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3625 unsigned NumHi = 0; 3626 unsigned NumLo = 0; 3627 // If no more than two elements come from either vector. This can be 3628 // implemented with two shuffles. First shuffle gather the elements. 3629 // The second shuffle, which takes the first shuffle as both of its 3630 // vector operands, put the elements into the right order. 3631 for (unsigned i = 0; i != NumElems; ++i) { 3632 SDOperand Elt = PermMask.getOperand(i); 3633 if (Elt.getOpcode() == ISD::UNDEF) { 3634 Locs[i] = std::make_pair(-1, -1); 3635 } else { 3636 unsigned Val = cast<ConstantSDNode>(Elt)->getValue(); 3637 if (Val < NumElems) { 3638 Locs[i] = std::make_pair(0, NumLo); 3639 Mask1[NumLo] = Elt; 3640 NumLo++; 3641 } else { 3642 Locs[i] = std::make_pair(1, NumHi); 3643 if (2+NumHi < NumElems) 3644 Mask1[2+NumHi] = Elt; 3645 NumHi++; 3646 } 3647 } 3648 } 3649 if (NumLo <= 2 && NumHi <= 2) { 3650 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3651 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3652 &Mask1[0], Mask1.size())); 3653 for (unsigned i = 0; i != NumElems; ++i) { 3654 if (Locs[i].first == -1) 3655 continue; 3656 else { 3657 unsigned Idx = (i < NumElems/2) ? 0 : NumElems; 3658 Idx += Locs[i].first * (NumElems/2) + Locs[i].second; 3659 Mask2[i] = DAG.getConstant(Idx, MaskEVT); 3660 } 3661 } 3662 3663 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, 3664 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3665 &Mask2[0], Mask2.size())); 3666 } 3667 3668 // Break it into (shuffle shuffle_hi, shuffle_lo). 3669 Locs.clear(); 3670 std::vector<SDOperand> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3671 std::vector<SDOperand> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3672 std::vector<SDOperand> *MaskPtr = &LoMask; 3673 unsigned MaskIdx = 0; 3674 unsigned LoIdx = 0; 3675 unsigned HiIdx = NumElems/2; 3676 for (unsigned i = 0; i != NumElems; ++i) { 3677 if (i == NumElems/2) { 3678 MaskPtr = &HiMask; 3679 MaskIdx = 1; 3680 LoIdx = 0; 3681 HiIdx = NumElems/2; 3682 } 3683 SDOperand Elt = PermMask.getOperand(i); 3684 if (Elt.getOpcode() == ISD::UNDEF) { 3685 Locs[i] = std::make_pair(-1, -1); 3686 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) { 3687 Locs[i] = std::make_pair(MaskIdx, LoIdx); 3688 (*MaskPtr)[LoIdx] = Elt; 3689 LoIdx++; 3690 } else { 3691 Locs[i] = std::make_pair(MaskIdx, HiIdx); 3692 (*MaskPtr)[HiIdx] = Elt; 3693 HiIdx++; 3694 } 3695 } 3696 3697 SDOperand LoShuffle = 3698 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3699 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3700 &LoMask[0], LoMask.size())); 3701 SDOperand HiShuffle = 3702 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3703 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3704 &HiMask[0], HiMask.size())); 3705 std::vector<SDOperand> MaskOps; 3706 for (unsigned i = 0; i != NumElems; ++i) { 3707 if (Locs[i].first == -1) { 3708 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3709 } else { 3710 unsigned Idx = Locs[i].first * NumElems + Locs[i].second; 3711 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT)); 3712 } 3713 } 3714 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle, 3715 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3716 &MaskOps[0], MaskOps.size())); 3717 } 3718 3719 return SDOperand(); 3720} 3721 3722SDOperand 3723X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3724 if (!isa<ConstantSDNode>(Op.getOperand(1))) 3725 return SDOperand(); 3726 3727 MVT::ValueType VT = Op.getValueType(); 3728 // TODO: handle v16i8. 3729 if (MVT::getSizeInBits(VT) == 16) { 3730 // Transform it so it match pextrw which produces a 32-bit result. 3731 MVT::ValueType EVT = (MVT::ValueType)(VT+1); 3732 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, 3733 Op.getOperand(0), Op.getOperand(1)); 3734 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, 3735 DAG.getValueType(VT)); 3736 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3737 } else if (MVT::getSizeInBits(VT) == 32) { 3738 SDOperand Vec = Op.getOperand(0); 3739 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3740 if (Idx == 0) 3741 return Op; 3742 // SHUFPS the element to the lowest double word, then movss. 3743 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3744 std::vector<SDOperand> IdxVec; 3745 IdxVec.push_back(DAG.getConstant(Idx, MVT::getVectorBaseType(MaskVT))); 3746 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT))); 3747 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT))); 3748 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT))); 3749 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3750 &IdxVec[0], IdxVec.size()); 3751 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3752 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3753 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3754 DAG.getConstant(0, getPointerTy())); 3755 } else if (MVT::getSizeInBits(VT) == 64) { 3756 SDOperand Vec = Op.getOperand(0); 3757 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3758 if (Idx == 0) 3759 return Op; 3760 3761 // UNPCKHPD the element to the lowest double word, then movsd. 3762 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 3763 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 3764 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3765 std::vector<SDOperand> IdxVec; 3766 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorBaseType(MaskVT))); 3767 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT))); 3768 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3769 &IdxVec[0], IdxVec.size()); 3770 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3771 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3772 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3773 DAG.getConstant(0, getPointerTy())); 3774 } 3775 3776 return SDOperand(); 3777} 3778 3779SDOperand 3780X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3781 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 3782 // as its second argument. 3783 MVT::ValueType VT = Op.getValueType(); 3784 MVT::ValueType BaseVT = MVT::getVectorBaseType(VT); 3785 SDOperand N0 = Op.getOperand(0); 3786 SDOperand N1 = Op.getOperand(1); 3787 SDOperand N2 = Op.getOperand(2); 3788 if (MVT::getSizeInBits(BaseVT) == 16) { 3789 if (N1.getValueType() != MVT::i32) 3790 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3791 if (N2.getValueType() != MVT::i32) 3792 N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(), MVT::i32); 3793 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2); 3794 } else if (MVT::getSizeInBits(BaseVT) == 32) { 3795 unsigned Idx = cast<ConstantSDNode>(N2)->getValue(); 3796 if (Idx == 0) { 3797 // Use a movss. 3798 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, N1); 3799 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3800 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 3801 std::vector<SDOperand> MaskVec; 3802 MaskVec.push_back(DAG.getConstant(4, BaseVT)); 3803 for (unsigned i = 1; i <= 3; ++i) 3804 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3805 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, N0, N1, 3806 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3807 &MaskVec[0], MaskVec.size())); 3808 } else { 3809 // Use two pinsrw instructions to insert a 32 bit value. 3810 Idx <<= 1; 3811 if (MVT::isFloatingPoint(N1.getValueType())) { 3812 if (ISD::isNON_EXTLoad(N1.Val)) { 3813 // Just load directly from f32mem to GR32. 3814 LoadSDNode *LD = cast<LoadSDNode>(N1); 3815 N1 = DAG.getLoad(MVT::i32, LD->getChain(), LD->getBasePtr(), 3816 LD->getSrcValue(), LD->getSrcValueOffset()); 3817 } else { 3818 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v4f32, N1); 3819 N1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, N1); 3820 N1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, N1, 3821 DAG.getConstant(0, getPointerTy())); 3822 } 3823 } 3824 N0 = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, N0); 3825 N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1, 3826 DAG.getConstant(Idx, getPointerTy())); 3827 N1 = DAG.getNode(ISD::SRL, MVT::i32, N1, DAG.getConstant(16, MVT::i8)); 3828 N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1, 3829 DAG.getConstant(Idx+1, getPointerTy())); 3830 return DAG.getNode(ISD::BIT_CONVERT, VT, N0); 3831 } 3832 } 3833 3834 return SDOperand(); 3835} 3836 3837SDOperand 3838X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3839 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); 3840 return DAG.getNode(X86ISD::S2VEC, Op.getValueType(), AnyExt); 3841} 3842 3843// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 3844// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 3845// one of the above mentioned nodes. It has to be wrapped because otherwise 3846// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 3847// be used to form addressing mode. These wrapped nodes will be selected 3848// into MOV32ri. 3849SDOperand 3850X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 3851 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 3852 SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(), 3853 getPointerTy(), 3854 CP->getAlignment()); 3855 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3856 if (Subtarget->isTargetDarwin()) { 3857 // With PIC, the address is actually $g + Offset. 3858 if (!Subtarget->is64Bit() && 3859 getTargetMachine().getRelocationModel() == Reloc::PIC_) 3860 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3861 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result); 3862 } 3863 3864 return Result; 3865} 3866 3867SDOperand 3868X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 3869 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3870 SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); 3871 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3872 if (Subtarget->isTargetDarwin()) { 3873 // With PIC, the address is actually $g + Offset. 3874 if (!Subtarget->is64Bit() && 3875 getTargetMachine().getRelocationModel() == Reloc::PIC_) 3876 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3877 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3878 Result); 3879 } 3880 3881 // For Darwin & Mingw32, external and weak symbols are indirect, so we want to 3882 // load the value at address GV, not the value of GV itself. This means that 3883 // the GlobalAddress must be in the base or index register of the address, not 3884 // the GV offset field. Platform check is inside GVRequiresExtraLoad() call 3885 if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)) 3886 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, NULL, 0); 3887 3888 return Result; 3889} 3890 3891SDOperand 3892X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { 3893 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 3894 SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 3895 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3896 if (Subtarget->isTargetDarwin()) { 3897 // With PIC, the address is actually $g + Offset. 3898 if (!Subtarget->is64Bit() && 3899 getTargetMachine().getRelocationModel() == Reloc::PIC_) 3900 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3901 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3902 Result); 3903 } 3904 3905 return Result; 3906} 3907 3908SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { 3909 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 3910 "Not an i64 shift!"); 3911 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 3912 SDOperand ShOpLo = Op.getOperand(0); 3913 SDOperand ShOpHi = Op.getOperand(1); 3914 SDOperand ShAmt = Op.getOperand(2); 3915 SDOperand Tmp1 = isSRA ? 3916 DAG.getNode(ISD::SRA, MVT::i32, ShOpHi, DAG.getConstant(31, MVT::i8)) : 3917 DAG.getConstant(0, MVT::i32); 3918 3919 SDOperand Tmp2, Tmp3; 3920 if (Op.getOpcode() == ISD::SHL_PARTS) { 3921 Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt); 3922 Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt); 3923 } else { 3924 Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt); 3925 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt); 3926 } 3927 3928 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 3929 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, 3930 DAG.getConstant(32, MVT::i8)); 3931 SDOperand COps[]={DAG.getEntryNode(), AndNode, DAG.getConstant(0, MVT::i8)}; 3932 SDOperand InFlag = DAG.getNode(X86ISD::CMP, VTs, 2, COps, 3).getValue(1); 3933 3934 SDOperand Hi, Lo; 3935 SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); 3936 3937 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag); 3938 SmallVector<SDOperand, 4> Ops; 3939 if (Op.getOpcode() == ISD::SHL_PARTS) { 3940 Ops.push_back(Tmp2); 3941 Ops.push_back(Tmp3); 3942 Ops.push_back(CC); 3943 Ops.push_back(InFlag); 3944 Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3945 InFlag = Hi.getValue(1); 3946 3947 Ops.clear(); 3948 Ops.push_back(Tmp3); 3949 Ops.push_back(Tmp1); 3950 Ops.push_back(CC); 3951 Ops.push_back(InFlag); 3952 Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3953 } else { 3954 Ops.push_back(Tmp2); 3955 Ops.push_back(Tmp3); 3956 Ops.push_back(CC); 3957 Ops.push_back(InFlag); 3958 Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3959 InFlag = Lo.getValue(1); 3960 3961 Ops.clear(); 3962 Ops.push_back(Tmp3); 3963 Ops.push_back(Tmp1); 3964 Ops.push_back(CC); 3965 Ops.push_back(InFlag); 3966 Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3967 } 3968 3969 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32); 3970 Ops.clear(); 3971 Ops.push_back(Lo); 3972 Ops.push_back(Hi); 3973 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size()); 3974} 3975 3976SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 3977 assert(Op.getOperand(0).getValueType() <= MVT::i64 && 3978 Op.getOperand(0).getValueType() >= MVT::i16 && 3979 "Unknown SINT_TO_FP to lower!"); 3980 3981 SDOperand Result; 3982 MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); 3983 unsigned Size = MVT::getSizeInBits(SrcVT)/8; 3984 MachineFunction &MF = DAG.getMachineFunction(); 3985 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 3986 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3987 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), 3988 StackSlot, NULL, 0); 3989 3990 // Build the FILD 3991 std::vector<MVT::ValueType> Tys; 3992 Tys.push_back(MVT::f64); 3993 Tys.push_back(MVT::Other); 3994 if (X86ScalarSSE) Tys.push_back(MVT::Flag); 3995 std::vector<SDOperand> Ops; 3996 Ops.push_back(Chain); 3997 Ops.push_back(StackSlot); 3998 Ops.push_back(DAG.getValueType(SrcVT)); 3999 Result = DAG.getNode(X86ScalarSSE ? X86ISD::FILD_FLAG :X86ISD::FILD, 4000 Tys, &Ops[0], Ops.size()); 4001 4002 if (X86ScalarSSE) { 4003 Chain = Result.getValue(1); 4004 SDOperand InFlag = Result.getValue(2); 4005 4006 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 4007 // shouldn't be necessary except that RFP cannot be live across 4008 // multiple blocks. When stackifier is fixed, they can be uncoupled. 4009 MachineFunction &MF = DAG.getMachineFunction(); 4010 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 4011 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4012 std::vector<MVT::ValueType> Tys; 4013 Tys.push_back(MVT::Other); 4014 std::vector<SDOperand> Ops; 4015 Ops.push_back(Chain); 4016 Ops.push_back(Result); 4017 Ops.push_back(StackSlot); 4018 Ops.push_back(DAG.getValueType(Op.getValueType())); 4019 Ops.push_back(InFlag); 4020 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 4021 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, NULL, 0); 4022 } 4023 4024 return Result; 4025} 4026 4027SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 4028 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 && 4029 "Unknown FP_TO_SINT to lower!"); 4030 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 4031 // stack slot. 4032 MachineFunction &MF = DAG.getMachineFunction(); 4033 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; 4034 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4035 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4036 4037 unsigned Opc; 4038 switch (Op.getValueType()) { 4039 default: assert(0 && "Invalid FP_TO_SINT to lower!"); 4040 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 4041 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 4042 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 4043 } 4044 4045 SDOperand Chain = DAG.getEntryNode(); 4046 SDOperand Value = Op.getOperand(0); 4047 if (X86ScalarSSE) { 4048 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 4049 Chain = DAG.getStore(Chain, Value, StackSlot, NULL, 0); 4050 std::vector<MVT::ValueType> Tys; 4051 Tys.push_back(MVT::f64); 4052 Tys.push_back(MVT::Other); 4053 std::vector<SDOperand> Ops; 4054 Ops.push_back(Chain); 4055 Ops.push_back(StackSlot); 4056 Ops.push_back(DAG.getValueType(Op.getOperand(0).getValueType())); 4057 Value = DAG.getNode(X86ISD::FLD, Tys, &Ops[0], Ops.size()); 4058 Chain = Value.getValue(1); 4059 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4060 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4061 } 4062 4063 // Build the FP_TO_INT*_IN_MEM 4064 std::vector<SDOperand> Ops; 4065 Ops.push_back(Chain); 4066 Ops.push_back(Value); 4067 Ops.push_back(StackSlot); 4068 SDOperand FIST = DAG.getNode(Opc, MVT::Other, &Ops[0], Ops.size()); 4069 4070 // Load the result. 4071 return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0); 4072} 4073 4074SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { 4075 MVT::ValueType VT = Op.getValueType(); 4076 const Type *OpNTy = MVT::getTypeForValueType(VT); 4077 std::vector<Constant*> CV; 4078 if (VT == MVT::f64) { 4079 CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63)))); 4080 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 4081 } else { 4082 CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31)))); 4083 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 4084 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 4085 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 4086 } 4087 Constant *CS = ConstantStruct::get(CV); 4088 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4); 4089 std::vector<MVT::ValueType> Tys; 4090 Tys.push_back(VT); 4091 Tys.push_back(MVT::Other); 4092 SmallVector<SDOperand, 3> Ops; 4093 Ops.push_back(DAG.getEntryNode()); 4094 Ops.push_back(CPIdx); 4095 Ops.push_back(DAG.getSrcValue(NULL)); 4096 SDOperand Mask = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size()); 4097 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask); 4098} 4099 4100SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { 4101 MVT::ValueType VT = Op.getValueType(); 4102 const Type *OpNTy = MVT::getTypeForValueType(VT); 4103 std::vector<Constant*> CV; 4104 if (VT == MVT::f64) { 4105 CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63))); 4106 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 4107 } else { 4108 CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(1U << 31))); 4109 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 4110 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 4111 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 4112 } 4113 Constant *CS = ConstantStruct::get(CV); 4114 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4); 4115 std::vector<MVT::ValueType> Tys; 4116 Tys.push_back(VT); 4117 Tys.push_back(MVT::Other); 4118 SmallVector<SDOperand, 3> Ops; 4119 Ops.push_back(DAG.getEntryNode()); 4120 Ops.push_back(CPIdx); 4121 Ops.push_back(DAG.getSrcValue(NULL)); 4122 SDOperand Mask = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size()); 4123 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask); 4124} 4125 4126SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG, 4127 SDOperand Chain) { 4128 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 4129 SDOperand Cond; 4130 SDOperand Op0 = Op.getOperand(0); 4131 SDOperand Op1 = Op.getOperand(1); 4132 SDOperand CC = Op.getOperand(2); 4133 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 4134 const MVT::ValueType *VTs1 = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4135 const MVT::ValueType *VTs2 = DAG.getNodeValueTypes(MVT::i8, MVT::Flag); 4136 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); 4137 unsigned X86CC; 4138 4139 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, 4140 Op0, Op1, DAG)) { 4141 SDOperand Ops1[] = { Chain, Op0, Op1 }; 4142 Cond = DAG.getNode(X86ISD::CMP, VTs1, 2, Ops1, 3).getValue(1); 4143 SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond }; 4144 return DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2); 4145 } 4146 4147 assert(isFP && "Illegal integer SetCC!"); 4148 4149 SDOperand COps[] = { Chain, Op0, Op1 }; 4150 Cond = DAG.getNode(X86ISD::CMP, VTs1, 2, COps, 3).getValue(1); 4151 4152 switch (SetCCOpcode) { 4153 default: assert(false && "Illegal floating point SetCC!"); 4154 case ISD::SETOEQ: { // !PF & ZF 4155 SDOperand Ops1[] = { DAG.getConstant(X86::COND_NP, MVT::i8), Cond }; 4156 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops1, 2); 4157 SDOperand Ops2[] = { DAG.getConstant(X86::COND_E, MVT::i8), 4158 Tmp1.getValue(1) }; 4159 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2); 4160 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); 4161 } 4162 case ISD::SETUNE: { // PF | !ZF 4163 SDOperand Ops1[] = { DAG.getConstant(X86::COND_P, MVT::i8), Cond }; 4164 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops1, 2); 4165 SDOperand Ops2[] = { DAG.getConstant(X86::COND_NE, MVT::i8), 4166 Tmp1.getValue(1) }; 4167 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2); 4168 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); 4169 } 4170 } 4171} 4172 4173SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { 4174 bool addTest = true; 4175 SDOperand Chain = DAG.getEntryNode(); 4176 SDOperand Cond = Op.getOperand(0); 4177 SDOperand CC; 4178 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4179 4180 if (Cond.getOpcode() == ISD::SETCC) 4181 Cond = LowerSETCC(Cond, DAG, Chain); 4182 4183 if (Cond.getOpcode() == X86ISD::SETCC) { 4184 CC = Cond.getOperand(0); 4185 4186 // If condition flag is set by a X86ISD::CMP, then make a copy of it 4187 // (since flag operand cannot be shared). Use it as the condition setting 4188 // operand in place of the X86ISD::SETCC. 4189 // If the X86ISD::SETCC has more than one use, then perhaps it's better 4190 // to use a test instead of duplicating the X86ISD::CMP (for register 4191 // pressure reason)? 4192 SDOperand Cmp = Cond.getOperand(1); 4193 unsigned Opc = Cmp.getOpcode(); 4194 bool IllegalFPCMov = !X86ScalarSSE && 4195 MVT::isFloatingPoint(Op.getValueType()) && 4196 !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 4197 if ((Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) && 4198 !IllegalFPCMov) { 4199 SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) }; 4200 Cond = DAG.getNode(Opc, VTs, 2, Ops, 3); 4201 addTest = false; 4202 } 4203 } 4204 4205 if (addTest) { 4206 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4207 SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) }; 4208 Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3); 4209 } 4210 4211 VTs = DAG.getNodeValueTypes(Op.getValueType(), MVT::Flag); 4212 SmallVector<SDOperand, 4> Ops; 4213 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 4214 // condition is true. 4215 Ops.push_back(Op.getOperand(2)); 4216 Ops.push_back(Op.getOperand(1)); 4217 Ops.push_back(CC); 4218 Ops.push_back(Cond.getValue(1)); 4219 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 4220} 4221 4222SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { 4223 bool addTest = true; 4224 SDOperand Chain = Op.getOperand(0); 4225 SDOperand Cond = Op.getOperand(1); 4226 SDOperand Dest = Op.getOperand(2); 4227 SDOperand CC; 4228 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4229 4230 if (Cond.getOpcode() == ISD::SETCC) 4231 Cond = LowerSETCC(Cond, DAG, Chain); 4232 4233 if (Cond.getOpcode() == X86ISD::SETCC) { 4234 CC = Cond.getOperand(0); 4235 4236 // If condition flag is set by a X86ISD::CMP, then make a copy of it 4237 // (since flag operand cannot be shared). Use it as the condition setting 4238 // operand in place of the X86ISD::SETCC. 4239 // If the X86ISD::SETCC has more than one use, then perhaps it's better 4240 // to use a test instead of duplicating the X86ISD::CMP (for register 4241 // pressure reason)? 4242 SDOperand Cmp = Cond.getOperand(1); 4243 unsigned Opc = Cmp.getOpcode(); 4244 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) { 4245 SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) }; 4246 Cond = DAG.getNode(Opc, VTs, 2, Ops, 3); 4247 addTest = false; 4248 } 4249 } 4250 4251 if (addTest) { 4252 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4253 SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) }; 4254 Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3); 4255 } 4256 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), 4257 Cond, Op.getOperand(2), CC, Cond.getValue(1)); 4258} 4259 4260SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 4261 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 4262 SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); 4263 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4264 if (Subtarget->isTargetDarwin()) { 4265 // With PIC, the address is actually $g + Offset. 4266 if (!Subtarget->is64Bit() && 4267 getTargetMachine().getRelocationModel() == Reloc::PIC_) 4268 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4269 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4270 Result); 4271 } 4272 4273 return Result; 4274} 4275 4276SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { 4277 unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 4278 4279 if (Subtarget->is64Bit()) 4280 return LowerX86_64CCCCallTo(Op, DAG); 4281 else 4282 switch (CallingConv) { 4283 default: 4284 assert(0 && "Unsupported calling convention"); 4285 case CallingConv::Fast: 4286 if (EnableFastCC) { 4287 return LowerFastCCCallTo(Op, DAG, false); 4288 } 4289 // Falls through 4290 case CallingConv::C: 4291 case CallingConv::CSRet: 4292 return LowerCCCCallTo(Op, DAG); 4293 case CallingConv::X86_StdCall: 4294 return LowerStdCallCCCallTo(Op, DAG); 4295 case CallingConv::X86_FastCall: 4296 return LowerFastCCCallTo(Op, DAG, true); 4297 } 4298} 4299 4300SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { 4301 SDOperand Copy; 4302 4303 switch(Op.getNumOperands()) { 4304 default: 4305 assert(0 && "Do not know how to return this many arguments!"); 4306 abort(); 4307 case 1: // ret void. 4308 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Op.getOperand(0), 4309 DAG.getConstant(getBytesToPopOnReturn(), MVT::i16)); 4310 case 3: { 4311 MVT::ValueType ArgVT = Op.getOperand(1).getValueType(); 4312 4313 if (MVT::isVector(ArgVT) || 4314 (Subtarget->is64Bit() && MVT::isFloatingPoint(ArgVT))) { 4315 // Integer or FP vector result -> XMM0. 4316 if (DAG.getMachineFunction().liveout_empty()) 4317 DAG.getMachineFunction().addLiveOut(X86::XMM0); 4318 Copy = DAG.getCopyToReg(Op.getOperand(0), X86::XMM0, Op.getOperand(1), 4319 SDOperand()); 4320 } else if (MVT::isInteger(ArgVT)) { 4321 // Integer result -> EAX / RAX. 4322 // The C calling convention guarantees the return value has been 4323 // promoted to at least MVT::i32. The X86-64 ABI doesn't require the 4324 // value to be promoted MVT::i64. So we don't have to extend it to 4325 // 64-bit. Return the value in EAX, but mark RAX as liveout. 4326 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 4327 if (DAG.getMachineFunction().liveout_empty()) 4328 DAG.getMachineFunction().addLiveOut(Reg); 4329 4330 Reg = (ArgVT == MVT::i64) ? X86::RAX : X86::EAX; 4331 Copy = DAG.getCopyToReg(Op.getOperand(0), Reg, Op.getOperand(1), 4332 SDOperand()); 4333 } else if (!X86ScalarSSE) { 4334 // FP return with fp-stack value. 4335 if (DAG.getMachineFunction().liveout_empty()) 4336 DAG.getMachineFunction().addLiveOut(X86::ST0); 4337 4338 std::vector<MVT::ValueType> Tys; 4339 Tys.push_back(MVT::Other); 4340 Tys.push_back(MVT::Flag); 4341 std::vector<SDOperand> Ops; 4342 Ops.push_back(Op.getOperand(0)); 4343 Ops.push_back(Op.getOperand(1)); 4344 Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, &Ops[0], Ops.size()); 4345 } else { 4346 // FP return with ScalarSSE (return on fp-stack). 4347 if (DAG.getMachineFunction().liveout_empty()) 4348 DAG.getMachineFunction().addLiveOut(X86::ST0); 4349 4350 SDOperand MemLoc; 4351 SDOperand Chain = Op.getOperand(0); 4352 SDOperand Value = Op.getOperand(1); 4353 4354 if (ISD::isNON_EXTLoad(Value.Val) && 4355 (Chain == Value.getValue(1) || Chain == Value.getOperand(0))) { 4356 Chain = Value.getOperand(0); 4357 MemLoc = Value.getOperand(1); 4358 } else { 4359 // Spill the value to memory and reload it into top of stack. 4360 unsigned Size = MVT::getSizeInBits(ArgVT)/8; 4361 MachineFunction &MF = DAG.getMachineFunction(); 4362 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 4363 MemLoc = DAG.getFrameIndex(SSFI, getPointerTy()); 4364 Chain = DAG.getStore(Op.getOperand(0), Value, MemLoc, NULL, 0); 4365 } 4366 std::vector<MVT::ValueType> Tys; 4367 Tys.push_back(MVT::f64); 4368 Tys.push_back(MVT::Other); 4369 std::vector<SDOperand> Ops; 4370 Ops.push_back(Chain); 4371 Ops.push_back(MemLoc); 4372 Ops.push_back(DAG.getValueType(ArgVT)); 4373 Copy = DAG.getNode(X86ISD::FLD, Tys, &Ops[0], Ops.size()); 4374 Tys.clear(); 4375 Tys.push_back(MVT::Other); 4376 Tys.push_back(MVT::Flag); 4377 Ops.clear(); 4378 Ops.push_back(Copy.getValue(1)); 4379 Ops.push_back(Copy); 4380 Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, &Ops[0], Ops.size()); 4381 } 4382 break; 4383 } 4384 case 5: { 4385 unsigned Reg1 = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 4386 unsigned Reg2 = Subtarget->is64Bit() ? X86::RDX : X86::EDX; 4387 if (DAG.getMachineFunction().liveout_empty()) { 4388 DAG.getMachineFunction().addLiveOut(Reg1); 4389 DAG.getMachineFunction().addLiveOut(Reg2); 4390 } 4391 4392 Copy = DAG.getCopyToReg(Op.getOperand(0), Reg2, Op.getOperand(3), 4393 SDOperand()); 4394 Copy = DAG.getCopyToReg(Copy, Reg1, Op.getOperand(1), Copy.getValue(1)); 4395 break; 4396 } 4397 } 4398 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, 4399 Copy, DAG.getConstant(getBytesToPopOnReturn(), MVT::i16), 4400 Copy.getValue(1)); 4401} 4402 4403SDOperand 4404X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { 4405 MachineFunction &MF = DAG.getMachineFunction(); 4406 const Function* Fn = MF.getFunction(); 4407 if (Fn->hasExternalLinkage() && 4408 Subtarget->isTargetCygwin() && 4409 Fn->getName() == "main") 4410 MF.getInfo<X86FunctionInfo>()->setForceFramePointer(true); 4411 4412 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 4413 if (Subtarget->is64Bit()) 4414 return LowerX86_64CCCArguments(Op, DAG); 4415 else 4416 switch(CC) { 4417 default: 4418 assert(0 && "Unsupported calling convention"); 4419 case CallingConv::Fast: 4420 if (EnableFastCC) { 4421 return LowerFastCCArguments(Op, DAG); 4422 } 4423 // Falls through 4424 case CallingConv::C: 4425 case CallingConv::CSRet: 4426 return LowerCCCArguments(Op, DAG); 4427 case CallingConv::X86_StdCall: 4428 MF.getInfo<X86FunctionInfo>()->setDecorationStyle(StdCall); 4429 return LowerStdCallCCArguments(Op, DAG); 4430 case CallingConv::X86_FastCall: 4431 MF.getInfo<X86FunctionInfo>()->setDecorationStyle(FastCall); 4432 return LowerFastCallCCArguments(Op, DAG); 4433 } 4434} 4435 4436SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) { 4437 SDOperand InFlag(0, 0); 4438 SDOperand Chain = Op.getOperand(0); 4439 unsigned Align = 4440 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 4441 if (Align == 0) Align = 1; 4442 4443 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4444 // If not DWORD aligned, call memset if size is less than the threshold. 4445 // It knows how to align to the right boundary first. 4446 if ((Align & 3) != 0 || 4447 (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) { 4448 MVT::ValueType IntPtr = getPointerTy(); 4449 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 4450 std::vector<std::pair<SDOperand, const Type*> > Args; 4451 Args.push_back(std::make_pair(Op.getOperand(1), IntPtrTy)); 4452 // Extend the ubyte argument to be an int value for the call. 4453 SDOperand Val = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2)); 4454 Args.push_back(std::make_pair(Val, IntPtrTy)); 4455 Args.push_back(std::make_pair(Op.getOperand(3), IntPtrTy)); 4456 std::pair<SDOperand,SDOperand> CallResult = 4457 LowerCallTo(Chain, Type::VoidTy, false, CallingConv::C, false, 4458 DAG.getExternalSymbol("memset", IntPtr), Args, DAG); 4459 return CallResult.second; 4460 } 4461 4462 MVT::ValueType AVT; 4463 SDOperand Count; 4464 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4465 unsigned BytesLeft = 0; 4466 bool TwoRepStos = false; 4467 if (ValC) { 4468 unsigned ValReg; 4469 uint64_t Val = ValC->getValue() & 255; 4470 4471 // If the value is a constant, then we can potentially use larger sets. 4472 switch (Align & 3) { 4473 case 2: // WORD aligned 4474 AVT = MVT::i16; 4475 ValReg = X86::AX; 4476 Val = (Val << 8) | Val; 4477 break; 4478 case 0: // DWORD aligned 4479 AVT = MVT::i32; 4480 ValReg = X86::EAX; 4481 Val = (Val << 8) | Val; 4482 Val = (Val << 16) | Val; 4483 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned 4484 AVT = MVT::i64; 4485 ValReg = X86::RAX; 4486 Val = (Val << 32) | Val; 4487 } 4488 break; 4489 default: // Byte aligned 4490 AVT = MVT::i8; 4491 ValReg = X86::AL; 4492 Count = Op.getOperand(3); 4493 break; 4494 } 4495 4496 if (AVT > MVT::i8) { 4497 if (I) { 4498 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4499 Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy()); 4500 BytesLeft = I->getValue() % UBytes; 4501 } else { 4502 assert(AVT >= MVT::i32 && 4503 "Do not use rep;stos if not at least DWORD aligned"); 4504 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 4505 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 4506 TwoRepStos = true; 4507 } 4508 } 4509 4510 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT), 4511 InFlag); 4512 InFlag = Chain.getValue(1); 4513 } else { 4514 AVT = MVT::i8; 4515 Count = Op.getOperand(3); 4516 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag); 4517 InFlag = Chain.getValue(1); 4518 } 4519 4520 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4521 Count, InFlag); 4522 InFlag = Chain.getValue(1); 4523 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4524 Op.getOperand(1), InFlag); 4525 InFlag = Chain.getValue(1); 4526 4527 std::vector<MVT::ValueType> Tys; 4528 Tys.push_back(MVT::Other); 4529 Tys.push_back(MVT::Flag); 4530 std::vector<SDOperand> Ops; 4531 Ops.push_back(Chain); 4532 Ops.push_back(DAG.getValueType(AVT)); 4533 Ops.push_back(InFlag); 4534 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4535 4536 if (TwoRepStos) { 4537 InFlag = Chain.getValue(1); 4538 Count = Op.getOperand(3); 4539 MVT::ValueType CVT = Count.getValueType(); 4540 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4541 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4542 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4543 Left, InFlag); 4544 InFlag = Chain.getValue(1); 4545 Tys.clear(); 4546 Tys.push_back(MVT::Other); 4547 Tys.push_back(MVT::Flag); 4548 Ops.clear(); 4549 Ops.push_back(Chain); 4550 Ops.push_back(DAG.getValueType(MVT::i8)); 4551 Ops.push_back(InFlag); 4552 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4553 } else if (BytesLeft) { 4554 // Issue stores for the last 1 - 7 bytes. 4555 SDOperand Value; 4556 unsigned Val = ValC->getValue() & 255; 4557 unsigned Offset = I->getValue() - BytesLeft; 4558 SDOperand DstAddr = Op.getOperand(1); 4559 MVT::ValueType AddrVT = DstAddr.getValueType(); 4560 if (BytesLeft >= 4) { 4561 Val = (Val << 8) | Val; 4562 Val = (Val << 16) | Val; 4563 Value = DAG.getConstant(Val, MVT::i32); 4564 Chain = DAG.getStore(Chain, Value, 4565 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4566 DAG.getConstant(Offset, AddrVT)), 4567 NULL, 0); 4568 BytesLeft -= 4; 4569 Offset += 4; 4570 } 4571 if (BytesLeft >= 2) { 4572 Value = DAG.getConstant((Val << 8) | Val, MVT::i16); 4573 Chain = DAG.getStore(Chain, Value, 4574 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4575 DAG.getConstant(Offset, AddrVT)), 4576 NULL, 0); 4577 BytesLeft -= 2; 4578 Offset += 2; 4579 } 4580 if (BytesLeft == 1) { 4581 Value = DAG.getConstant(Val, MVT::i8); 4582 Chain = DAG.getStore(Chain, Value, 4583 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4584 DAG.getConstant(Offset, AddrVT)), 4585 NULL, 0); 4586 } 4587 } 4588 4589 return Chain; 4590} 4591 4592SDOperand X86TargetLowering::LowerMEMCPY(SDOperand Op, SelectionDAG &DAG) { 4593 SDOperand Chain = Op.getOperand(0); 4594 unsigned Align = 4595 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 4596 if (Align == 0) Align = 1; 4597 4598 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4599 // If not DWORD aligned, call memcpy if size is less than the threshold. 4600 // It knows how to align to the right boundary first. 4601 if ((Align & 3) != 0 || 4602 (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) { 4603 MVT::ValueType IntPtr = getPointerTy(); 4604 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 4605 std::vector<std::pair<SDOperand, const Type*> > Args; 4606 Args.push_back(std::make_pair(Op.getOperand(1), IntPtrTy)); 4607 Args.push_back(std::make_pair(Op.getOperand(2), IntPtrTy)); 4608 Args.push_back(std::make_pair(Op.getOperand(3), IntPtrTy)); 4609 std::pair<SDOperand,SDOperand> CallResult = 4610 LowerCallTo(Chain, Type::VoidTy, false, CallingConv::C, false, 4611 DAG.getExternalSymbol("memcpy", IntPtr), Args, DAG); 4612 return CallResult.second; 4613 } 4614 4615 MVT::ValueType AVT; 4616 SDOperand Count; 4617 unsigned BytesLeft = 0; 4618 bool TwoRepMovs = false; 4619 switch (Align & 3) { 4620 case 2: // WORD aligned 4621 AVT = MVT::i16; 4622 break; 4623 case 0: // DWORD aligned 4624 AVT = MVT::i32; 4625 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned 4626 AVT = MVT::i64; 4627 break; 4628 default: // Byte aligned 4629 AVT = MVT::i8; 4630 Count = Op.getOperand(3); 4631 break; 4632 } 4633 4634 if (AVT > MVT::i8) { 4635 if (I) { 4636 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4637 Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy()); 4638 BytesLeft = I->getValue() % UBytes; 4639 } else { 4640 assert(AVT >= MVT::i32 && 4641 "Do not use rep;movs if not at least DWORD aligned"); 4642 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 4643 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 4644 TwoRepMovs = true; 4645 } 4646 } 4647 4648 SDOperand InFlag(0, 0); 4649 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4650 Count, InFlag); 4651 InFlag = Chain.getValue(1); 4652 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4653 Op.getOperand(1), InFlag); 4654 InFlag = Chain.getValue(1); 4655 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI, 4656 Op.getOperand(2), InFlag); 4657 InFlag = Chain.getValue(1); 4658 4659 std::vector<MVT::ValueType> Tys; 4660 Tys.push_back(MVT::Other); 4661 Tys.push_back(MVT::Flag); 4662 std::vector<SDOperand> Ops; 4663 Ops.push_back(Chain); 4664 Ops.push_back(DAG.getValueType(AVT)); 4665 Ops.push_back(InFlag); 4666 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4667 4668 if (TwoRepMovs) { 4669 InFlag = Chain.getValue(1); 4670 Count = Op.getOperand(3); 4671 MVT::ValueType CVT = Count.getValueType(); 4672 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4673 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4674 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4675 Left, InFlag); 4676 InFlag = Chain.getValue(1); 4677 Tys.clear(); 4678 Tys.push_back(MVT::Other); 4679 Tys.push_back(MVT::Flag); 4680 Ops.clear(); 4681 Ops.push_back(Chain); 4682 Ops.push_back(DAG.getValueType(MVT::i8)); 4683 Ops.push_back(InFlag); 4684 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4685 } else if (BytesLeft) { 4686 // Issue loads and stores for the last 1 - 7 bytes. 4687 unsigned Offset = I->getValue() - BytesLeft; 4688 SDOperand DstAddr = Op.getOperand(1); 4689 MVT::ValueType DstVT = DstAddr.getValueType(); 4690 SDOperand SrcAddr = Op.getOperand(2); 4691 MVT::ValueType SrcVT = SrcAddr.getValueType(); 4692 SDOperand Value; 4693 if (BytesLeft >= 4) { 4694 Value = DAG.getLoad(MVT::i32, Chain, 4695 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4696 DAG.getConstant(Offset, SrcVT)), 4697 NULL, 0); 4698 Chain = Value.getValue(1); 4699 Chain = DAG.getStore(Chain, Value, 4700 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4701 DAG.getConstant(Offset, DstVT)), 4702 NULL, 0); 4703 BytesLeft -= 4; 4704 Offset += 4; 4705 } 4706 if (BytesLeft >= 2) { 4707 Value = DAG.getLoad(MVT::i16, Chain, 4708 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4709 DAG.getConstant(Offset, SrcVT)), 4710 NULL, 0); 4711 Chain = Value.getValue(1); 4712 Chain = DAG.getStore(Chain, Value, 4713 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4714 DAG.getConstant(Offset, DstVT)), 4715 NULL, 0); 4716 BytesLeft -= 2; 4717 Offset += 2; 4718 } 4719 4720 if (BytesLeft == 1) { 4721 Value = DAG.getLoad(MVT::i8, Chain, 4722 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4723 DAG.getConstant(Offset, SrcVT)), 4724 NULL, 0); 4725 Chain = Value.getValue(1); 4726 Chain = DAG.getStore(Chain, Value, 4727 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4728 DAG.getConstant(Offset, DstVT)), 4729 NULL, 0); 4730 } 4731 } 4732 4733 return Chain; 4734} 4735 4736SDOperand 4737X86TargetLowering::LowerREADCYCLCECOUNTER(SDOperand Op, SelectionDAG &DAG) { 4738 std::vector<MVT::ValueType> Tys; 4739 Tys.push_back(MVT::Other); 4740 Tys.push_back(MVT::Flag); 4741 std::vector<SDOperand> Ops; 4742 Ops.push_back(Op.getOperand(0)); 4743 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &Ops[0], Ops.size()); 4744 Ops.clear(); 4745 if (Subtarget->is64Bit()) { 4746 SDOperand Copy1 = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); 4747 SDOperand Copy2 = DAG.getCopyFromReg(Copy1.getValue(1), X86::RDX, 4748 MVT::i64, Copy1.getValue(2)); 4749 SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, Copy2, 4750 DAG.getConstant(32, MVT::i8)); 4751 Ops.push_back(DAG.getNode(ISD::OR, MVT::i64, Copy1, Tmp)); 4752 Ops.push_back(Copy2.getValue(1)); 4753 Tys[0] = MVT::i64; 4754 Tys[1] = MVT::Other; 4755 } else { 4756 SDOperand Copy1 = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); 4757 SDOperand Copy2 = DAG.getCopyFromReg(Copy1.getValue(1), X86::EDX, 4758 MVT::i32, Copy1.getValue(2)); 4759 Ops.push_back(Copy1); 4760 Ops.push_back(Copy2); 4761 Ops.push_back(Copy2.getValue(1)); 4762 Tys[0] = Tys[1] = MVT::i32; 4763 Tys.push_back(MVT::Other); 4764 } 4765 return DAG.getNode(ISD::MERGE_VALUES, Tys, &Ops[0], Ops.size()); 4766} 4767 4768SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { 4769 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2)); 4770 4771 if (!Subtarget->is64Bit()) { 4772 // vastart just stores the address of the VarArgsFrameIndex slot into the 4773 // memory location argument. 4774 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4775 return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV->getValue(), 4776 SV->getOffset()); 4777 } 4778 4779 // __va_list_tag: 4780 // gp_offset (0 - 6 * 8) 4781 // fp_offset (48 - 48 + 8 * 16) 4782 // overflow_arg_area (point to parameters coming in memory). 4783 // reg_save_area 4784 std::vector<SDOperand> MemOps; 4785 SDOperand FIN = Op.getOperand(1); 4786 // Store gp_offset 4787 SDOperand Store = DAG.getStore(Op.getOperand(0), 4788 DAG.getConstant(VarArgsGPOffset, MVT::i32), 4789 FIN, SV->getValue(), SV->getOffset()); 4790 MemOps.push_back(Store); 4791 4792 // Store fp_offset 4793 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 4794 DAG.getConstant(4, getPointerTy())); 4795 Store = DAG.getStore(Op.getOperand(0), 4796 DAG.getConstant(VarArgsFPOffset, MVT::i32), 4797 FIN, SV->getValue(), SV->getOffset()); 4798 MemOps.push_back(Store); 4799 4800 // Store ptr to overflow_arg_area 4801 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 4802 DAG.getConstant(4, getPointerTy())); 4803 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4804 Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV->getValue(), 4805 SV->getOffset()); 4806 MemOps.push_back(Store); 4807 4808 // Store ptr to reg_save_area. 4809 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 4810 DAG.getConstant(8, getPointerTy())); 4811 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 4812 Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV->getValue(), 4813 SV->getOffset()); 4814 MemOps.push_back(Store); 4815 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size()); 4816} 4817 4818SDOperand 4819X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 4820 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); 4821 switch (IntNo) { 4822 default: return SDOperand(); // Don't custom lower most intrinsics. 4823 // Comparison intrinsics. 4824 case Intrinsic::x86_sse_comieq_ss: 4825 case Intrinsic::x86_sse_comilt_ss: 4826 case Intrinsic::x86_sse_comile_ss: 4827 case Intrinsic::x86_sse_comigt_ss: 4828 case Intrinsic::x86_sse_comige_ss: 4829 case Intrinsic::x86_sse_comineq_ss: 4830 case Intrinsic::x86_sse_ucomieq_ss: 4831 case Intrinsic::x86_sse_ucomilt_ss: 4832 case Intrinsic::x86_sse_ucomile_ss: 4833 case Intrinsic::x86_sse_ucomigt_ss: 4834 case Intrinsic::x86_sse_ucomige_ss: 4835 case Intrinsic::x86_sse_ucomineq_ss: 4836 case Intrinsic::x86_sse2_comieq_sd: 4837 case Intrinsic::x86_sse2_comilt_sd: 4838 case Intrinsic::x86_sse2_comile_sd: 4839 case Intrinsic::x86_sse2_comigt_sd: 4840 case Intrinsic::x86_sse2_comige_sd: 4841 case Intrinsic::x86_sse2_comineq_sd: 4842 case Intrinsic::x86_sse2_ucomieq_sd: 4843 case Intrinsic::x86_sse2_ucomilt_sd: 4844 case Intrinsic::x86_sse2_ucomile_sd: 4845 case Intrinsic::x86_sse2_ucomigt_sd: 4846 case Intrinsic::x86_sse2_ucomige_sd: 4847 case Intrinsic::x86_sse2_ucomineq_sd: { 4848 unsigned Opc = 0; 4849 ISD::CondCode CC = ISD::SETCC_INVALID; 4850 switch (IntNo) { 4851 default: break; 4852 case Intrinsic::x86_sse_comieq_ss: 4853 case Intrinsic::x86_sse2_comieq_sd: 4854 Opc = X86ISD::COMI; 4855 CC = ISD::SETEQ; 4856 break; 4857 case Intrinsic::x86_sse_comilt_ss: 4858 case Intrinsic::x86_sse2_comilt_sd: 4859 Opc = X86ISD::COMI; 4860 CC = ISD::SETLT; 4861 break; 4862 case Intrinsic::x86_sse_comile_ss: 4863 case Intrinsic::x86_sse2_comile_sd: 4864 Opc = X86ISD::COMI; 4865 CC = ISD::SETLE; 4866 break; 4867 case Intrinsic::x86_sse_comigt_ss: 4868 case Intrinsic::x86_sse2_comigt_sd: 4869 Opc = X86ISD::COMI; 4870 CC = ISD::SETGT; 4871 break; 4872 case Intrinsic::x86_sse_comige_ss: 4873 case Intrinsic::x86_sse2_comige_sd: 4874 Opc = X86ISD::COMI; 4875 CC = ISD::SETGE; 4876 break; 4877 case Intrinsic::x86_sse_comineq_ss: 4878 case Intrinsic::x86_sse2_comineq_sd: 4879 Opc = X86ISD::COMI; 4880 CC = ISD::SETNE; 4881 break; 4882 case Intrinsic::x86_sse_ucomieq_ss: 4883 case Intrinsic::x86_sse2_ucomieq_sd: 4884 Opc = X86ISD::UCOMI; 4885 CC = ISD::SETEQ; 4886 break; 4887 case Intrinsic::x86_sse_ucomilt_ss: 4888 case Intrinsic::x86_sse2_ucomilt_sd: 4889 Opc = X86ISD::UCOMI; 4890 CC = ISD::SETLT; 4891 break; 4892 case Intrinsic::x86_sse_ucomile_ss: 4893 case Intrinsic::x86_sse2_ucomile_sd: 4894 Opc = X86ISD::UCOMI; 4895 CC = ISD::SETLE; 4896 break; 4897 case Intrinsic::x86_sse_ucomigt_ss: 4898 case Intrinsic::x86_sse2_ucomigt_sd: 4899 Opc = X86ISD::UCOMI; 4900 CC = ISD::SETGT; 4901 break; 4902 case Intrinsic::x86_sse_ucomige_ss: 4903 case Intrinsic::x86_sse2_ucomige_sd: 4904 Opc = X86ISD::UCOMI; 4905 CC = ISD::SETGE; 4906 break; 4907 case Intrinsic::x86_sse_ucomineq_ss: 4908 case Intrinsic::x86_sse2_ucomineq_sd: 4909 Opc = X86ISD::UCOMI; 4910 CC = ISD::SETNE; 4911 break; 4912 } 4913 4914 unsigned X86CC; 4915 SDOperand LHS = Op.getOperand(1); 4916 SDOperand RHS = Op.getOperand(2); 4917 translateX86CC(CC, true, X86CC, LHS, RHS, DAG); 4918 4919 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4920 SDOperand Ops1[] = { DAG.getEntryNode(), LHS, RHS }; 4921 SDOperand Cond = DAG.getNode(Opc, VTs, 2, Ops1, 3); 4922 VTs = DAG.getNodeValueTypes(MVT::i8, MVT::Flag); 4923 SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond }; 4924 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2); 4925 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); 4926 } 4927 } 4928} 4929 4930/// LowerOperation - Provide custom lowering hooks for some operations. 4931/// 4932SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 4933 switch (Op.getOpcode()) { 4934 default: assert(0 && "Should not custom lower this!"); 4935 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 4936 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 4937 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 4938 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 4939 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 4940 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4941 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 4942 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 4943 case ISD::SHL_PARTS: 4944 case ISD::SRA_PARTS: 4945 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 4946 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 4947 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 4948 case ISD::FABS: return LowerFABS(Op, DAG); 4949 case ISD::FNEG: return LowerFNEG(Op, DAG); 4950 case ISD::SETCC: return LowerSETCC(Op, DAG, DAG.getEntryNode()); 4951 case ISD::SELECT: return LowerSELECT(Op, DAG); 4952 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 4953 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 4954 case ISD::CALL: return LowerCALL(Op, DAG); 4955 case ISD::RET: return LowerRET(Op, DAG); 4956 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); 4957 case ISD::MEMSET: return LowerMEMSET(Op, DAG); 4958 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG); 4959 case ISD::READCYCLECOUNTER: return LowerREADCYCLCECOUNTER(Op, DAG); 4960 case ISD::VASTART: return LowerVASTART(Op, DAG); 4961 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 4962 } 4963} 4964 4965const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 4966 switch (Opcode) { 4967 default: return NULL; 4968 case X86ISD::SHLD: return "X86ISD::SHLD"; 4969 case X86ISD::SHRD: return "X86ISD::SHRD"; 4970 case X86ISD::FAND: return "X86ISD::FAND"; 4971 case X86ISD::FXOR: return "X86ISD::FXOR"; 4972 case X86ISD::FILD: return "X86ISD::FILD"; 4973 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 4974 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 4975 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 4976 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 4977 case X86ISD::FLD: return "X86ISD::FLD"; 4978 case X86ISD::FST: return "X86ISD::FST"; 4979 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT"; 4980 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT"; 4981 case X86ISD::CALL: return "X86ISD::CALL"; 4982 case X86ISD::TAILCALL: return "X86ISD::TAILCALL"; 4983 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 4984 case X86ISD::CMP: return "X86ISD::CMP"; 4985 case X86ISD::COMI: return "X86ISD::COMI"; 4986 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 4987 case X86ISD::SETCC: return "X86ISD::SETCC"; 4988 case X86ISD::CMOV: return "X86ISD::CMOV"; 4989 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 4990 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 4991 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 4992 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 4993 case X86ISD::LOAD_PACK: return "X86ISD::LOAD_PACK"; 4994 case X86ISD::LOAD_UA: return "X86ISD::LOAD_UA"; 4995 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 4996 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 4997 case X86ISD::S2VEC: return "X86ISD::S2VEC"; 4998 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 4999 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 5000 case X86ISD::FMAX: return "X86ISD::FMAX"; 5001 case X86ISD::FMIN: return "X86ISD::FMIN"; 5002 } 5003} 5004 5005/// isLegalAddressImmediate - Return true if the integer value or 5006/// GlobalValue can be used as the offset of the target addressing mode. 5007bool X86TargetLowering::isLegalAddressImmediate(int64_t V) const { 5008 // X86 allows a sign-extended 32-bit immediate field. 5009 return (V > -(1LL << 32) && V < (1LL << 32)-1); 5010} 5011 5012bool X86TargetLowering::isLegalAddressImmediate(GlobalValue *GV) const { 5013 // In 64-bit mode, GV is 64-bit so it won't fit in the 32-bit displacement 5014 // field unless we are in small code model. 5015 if (Subtarget->is64Bit() && 5016 getTargetMachine().getCodeModel() != CodeModel::Small) 5017 return false; 5018 5019 return (!Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)); 5020} 5021 5022/// isShuffleMaskLegal - Targets can use this to indicate that they only 5023/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 5024/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 5025/// are assumed to be legal. 5026bool 5027X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { 5028 // Only do shuffles on 128-bit vector types for now. 5029 if (MVT::getSizeInBits(VT) == 64) return false; 5030 return (Mask.Val->getNumOperands() <= 4 || 5031 isSplatMask(Mask.Val) || 5032 isPSHUFHW_PSHUFLWMask(Mask.Val) || 5033 X86::isUNPCKLMask(Mask.Val) || 5034 X86::isUNPCKL_v_undef_Mask(Mask.Val) || 5035 X86::isUNPCKHMask(Mask.Val)); 5036} 5037 5038bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps, 5039 MVT::ValueType EVT, 5040 SelectionDAG &DAG) const { 5041 unsigned NumElts = BVOps.size(); 5042 // Only do shuffles on 128-bit vector types for now. 5043 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; 5044 if (NumElts == 2) return true; 5045 if (NumElts == 4) { 5046 return (isMOVLMask(BVOps) || isCommutedMOVL(BVOps, true) || 5047 isSHUFPMask(BVOps) || isCommutedSHUFP(BVOps)); 5048 } 5049 return false; 5050} 5051 5052//===----------------------------------------------------------------------===// 5053// X86 Scheduler Hooks 5054//===----------------------------------------------------------------------===// 5055 5056MachineBasicBlock * 5057X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 5058 MachineBasicBlock *BB) { 5059 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5060 switch (MI->getOpcode()) { 5061 default: assert(false && "Unexpected instr type to insert"); 5062 case X86::CMOV_FR32: 5063 case X86::CMOV_FR64: 5064 case X86::CMOV_V4F32: 5065 case X86::CMOV_V2F64: 5066 case X86::CMOV_V2I64: { 5067 // To "insert" a SELECT_CC instruction, we actually have to insert the 5068 // diamond control-flow pattern. The incoming instruction knows the 5069 // destination vreg to set, the condition code register to branch on, the 5070 // true/false values to select between, and a branch opcode to use. 5071 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5072 ilist<MachineBasicBlock>::iterator It = BB; 5073 ++It; 5074 5075 // thisMBB: 5076 // ... 5077 // TrueVal = ... 5078 // cmpTY ccX, r1, r2 5079 // bCC copy1MBB 5080 // fallthrough --> copy0MBB 5081 MachineBasicBlock *thisMBB = BB; 5082 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 5083 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 5084 unsigned Opc = 5085 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 5086 BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB); 5087 MachineFunction *F = BB->getParent(); 5088 F->getBasicBlockList().insert(It, copy0MBB); 5089 F->getBasicBlockList().insert(It, sinkMBB); 5090 // Update machine-CFG edges by first adding all successors of the current 5091 // block to the new block which will contain the Phi node for the select. 5092 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 5093 e = BB->succ_end(); i != e; ++i) 5094 sinkMBB->addSuccessor(*i); 5095 // Next, remove all successors of the current block, and add the true 5096 // and fallthrough blocks as its successors. 5097 while(!BB->succ_empty()) 5098 BB->removeSuccessor(BB->succ_begin()); 5099 BB->addSuccessor(copy0MBB); 5100 BB->addSuccessor(sinkMBB); 5101 5102 // copy0MBB: 5103 // %FalseValue = ... 5104 // # fallthrough to sinkMBB 5105 BB = copy0MBB; 5106 5107 // Update machine-CFG edges 5108 BB->addSuccessor(sinkMBB); 5109 5110 // sinkMBB: 5111 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 5112 // ... 5113 BB = sinkMBB; 5114 BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg()) 5115 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 5116 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 5117 5118 delete MI; // The pseudo instruction is gone now. 5119 return BB; 5120 } 5121 5122 case X86::FP_TO_INT16_IN_MEM: 5123 case X86::FP_TO_INT32_IN_MEM: 5124 case X86::FP_TO_INT64_IN_MEM: { 5125 // Change the floating point control register to use "round towards zero" 5126 // mode when truncating to an integer value. 5127 MachineFunction *F = BB->getParent(); 5128 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2); 5129 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx); 5130 5131 // Load the old value of the high byte of the control word... 5132 unsigned OldCW = 5133 F->getSSARegMap()->createVirtualRegister(X86::GR16RegisterClass); 5134 addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx); 5135 5136 // Set the high part to be round to zero... 5137 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx) 5138 .addImm(0xC7F); 5139 5140 // Reload the modified control word now... 5141 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5142 5143 // Restore the memory image of control word to original value 5144 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx) 5145 .addReg(OldCW); 5146 5147 // Get the X86 opcode to use. 5148 unsigned Opc; 5149 switch (MI->getOpcode()) { 5150 default: assert(0 && "illegal opcode!"); 5151 case X86::FP_TO_INT16_IN_MEM: Opc = X86::FpIST16m; break; 5152 case X86::FP_TO_INT32_IN_MEM: Opc = X86::FpIST32m; break; 5153 case X86::FP_TO_INT64_IN_MEM: Opc = X86::FpIST64m; break; 5154 } 5155 5156 X86AddressMode AM; 5157 MachineOperand &Op = MI->getOperand(0); 5158 if (Op.isRegister()) { 5159 AM.BaseType = X86AddressMode::RegBase; 5160 AM.Base.Reg = Op.getReg(); 5161 } else { 5162 AM.BaseType = X86AddressMode::FrameIndexBase; 5163 AM.Base.FrameIndex = Op.getFrameIndex(); 5164 } 5165 Op = MI->getOperand(1); 5166 if (Op.isImmediate()) 5167 AM.Scale = Op.getImm(); 5168 Op = MI->getOperand(2); 5169 if (Op.isImmediate()) 5170 AM.IndexReg = Op.getImm(); 5171 Op = MI->getOperand(3); 5172 if (Op.isGlobalAddress()) { 5173 AM.GV = Op.getGlobal(); 5174 } else { 5175 AM.Disp = Op.getImm(); 5176 } 5177 addFullAddress(BuildMI(BB, TII->get(Opc)), AM) 5178 .addReg(MI->getOperand(4).getReg()); 5179 5180 // Reload the original control word now. 5181 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5182 5183 delete MI; // The pseudo instruction is gone now. 5184 return BB; 5185 } 5186 } 5187} 5188 5189//===----------------------------------------------------------------------===// 5190// X86 Optimization Hooks 5191//===----------------------------------------------------------------------===// 5192 5193void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 5194 uint64_t Mask, 5195 uint64_t &KnownZero, 5196 uint64_t &KnownOne, 5197 unsigned Depth) const { 5198 unsigned Opc = Op.getOpcode(); 5199 assert((Opc >= ISD::BUILTIN_OP_END || 5200 Opc == ISD::INTRINSIC_WO_CHAIN || 5201 Opc == ISD::INTRINSIC_W_CHAIN || 5202 Opc == ISD::INTRINSIC_VOID) && 5203 "Should use MaskedValueIsZero if you don't know whether Op" 5204 " is a target node!"); 5205 5206 KnownZero = KnownOne = 0; // Don't know anything. 5207 switch (Opc) { 5208 default: break; 5209 case X86ISD::SETCC: 5210 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL); 5211 break; 5212 } 5213} 5214 5215/// getShuffleScalarElt - Returns the scalar element that will make up the ith 5216/// element of the result of the vector shuffle. 5217static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) { 5218 MVT::ValueType VT = N->getValueType(0); 5219 SDOperand PermMask = N->getOperand(2); 5220 unsigned NumElems = PermMask.getNumOperands(); 5221 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1); 5222 i %= NumElems; 5223 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5224 return (i == 0) 5225 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT)); 5226 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) { 5227 SDOperand Idx = PermMask.getOperand(i); 5228 if (Idx.getOpcode() == ISD::UNDEF) 5229 return DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT)); 5230 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG); 5231 } 5232 return SDOperand(); 5233} 5234 5235/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 5236/// node is a GlobalAddress + an offset. 5237static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) { 5238 unsigned Opc = N->getOpcode(); 5239 if (Opc == X86ISD::Wrapper) { 5240 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) { 5241 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 5242 return true; 5243 } 5244 } else if (Opc == ISD::ADD) { 5245 SDOperand N1 = N->getOperand(0); 5246 SDOperand N2 = N->getOperand(1); 5247 if (isGAPlusOffset(N1.Val, GA, Offset)) { 5248 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 5249 if (V) { 5250 Offset += V->getSignExtended(); 5251 return true; 5252 } 5253 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 5254 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 5255 if (V) { 5256 Offset += V->getSignExtended(); 5257 return true; 5258 } 5259 } 5260 } 5261 return false; 5262} 5263 5264/// isConsecutiveLoad - Returns true if N is loading from an address of Base 5265/// + Dist * Size. 5266static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size, 5267 MachineFrameInfo *MFI) { 5268 if (N->getOperand(0).Val != Base->getOperand(0).Val) 5269 return false; 5270 5271 SDOperand Loc = N->getOperand(1); 5272 SDOperand BaseLoc = Base->getOperand(1); 5273 if (Loc.getOpcode() == ISD::FrameIndex) { 5274 if (BaseLoc.getOpcode() != ISD::FrameIndex) 5275 return false; 5276 int FI = dyn_cast<FrameIndexSDNode>(Loc)->getIndex(); 5277 int BFI = dyn_cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 5278 int FS = MFI->getObjectSize(FI); 5279 int BFS = MFI->getObjectSize(BFI); 5280 if (FS != BFS || FS != Size) return false; 5281 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size); 5282 } else { 5283 GlobalValue *GV1 = NULL; 5284 GlobalValue *GV2 = NULL; 5285 int64_t Offset1 = 0; 5286 int64_t Offset2 = 0; 5287 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 5288 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 5289 if (isGA1 && isGA2 && GV1 == GV2) 5290 return Offset1 == (Offset2 + Dist*Size); 5291 } 5292 5293 return false; 5294} 5295 5296static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI, 5297 const X86Subtarget *Subtarget) { 5298 GlobalValue *GV; 5299 int64_t Offset; 5300 if (isGAPlusOffset(Base, GV, Offset)) 5301 return (GV->getAlignment() >= 16 && (Offset % 16) == 0); 5302 else { 5303 assert(Base->getOpcode() == ISD::FrameIndex && "Unexpected base node!"); 5304 int BFI = dyn_cast<FrameIndexSDNode>(Base)->getIndex(); 5305 if (BFI < 0) 5306 // Fixed objects do not specify alignment, however the offsets are known. 5307 return ((Subtarget->getStackAlignment() % 16) == 0 && 5308 (MFI->getObjectOffset(BFI) % 16) == 0); 5309 else 5310 return MFI->getObjectAlignment(BFI) >= 16; 5311 } 5312 return false; 5313} 5314 5315 5316/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 5317/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 5318/// if the load addresses are consecutive, non-overlapping, and in the right 5319/// order. 5320static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 5321 const X86Subtarget *Subtarget) { 5322 MachineFunction &MF = DAG.getMachineFunction(); 5323 MachineFrameInfo *MFI = MF.getFrameInfo(); 5324 MVT::ValueType VT = N->getValueType(0); 5325 MVT::ValueType EVT = MVT::getVectorBaseType(VT); 5326 SDOperand PermMask = N->getOperand(2); 5327 int NumElems = (int)PermMask.getNumOperands(); 5328 SDNode *Base = NULL; 5329 for (int i = 0; i < NumElems; ++i) { 5330 SDOperand Idx = PermMask.getOperand(i); 5331 if (Idx.getOpcode() == ISD::UNDEF) { 5332 if (!Base) return SDOperand(); 5333 } else { 5334 SDOperand Arg = 5335 getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG); 5336 if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val)) 5337 return SDOperand(); 5338 if (!Base) 5339 Base = Arg.Val; 5340 else if (!isConsecutiveLoad(Arg.Val, Base, 5341 i, MVT::getSizeInBits(EVT)/8,MFI)) 5342 return SDOperand(); 5343 } 5344 } 5345 5346 bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget); 5347 if (isAlign16) { 5348 LoadSDNode *LD = cast<LoadSDNode>(Base); 5349 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 5350 LD->getSrcValueOffset()); 5351 } else { 5352 // Just use movups, it's shorter. 5353 std::vector<MVT::ValueType> Tys; 5354 Tys.push_back(MVT::v4f32); 5355 Tys.push_back(MVT::Other); 5356 SmallVector<SDOperand, 3> Ops; 5357 Ops.push_back(Base->getOperand(0)); 5358 Ops.push_back(Base->getOperand(1)); 5359 Ops.push_back(Base->getOperand(2)); 5360 return DAG.getNode(ISD::BIT_CONVERT, VT, 5361 DAG.getNode(X86ISD::LOAD_UA, Tys, &Ops[0], Ops.size())); 5362 } 5363} 5364 5365/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 5366static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 5367 const X86Subtarget *Subtarget) { 5368 SDOperand Cond = N->getOperand(0); 5369 5370 // If we have SSE[12] support, try to form min/max nodes. 5371 if (Subtarget->hasSSE2() && 5372 (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) { 5373 if (Cond.getOpcode() == ISD::SETCC) { 5374 // Get the LHS/RHS of the select. 5375 SDOperand LHS = N->getOperand(1); 5376 SDOperand RHS = N->getOperand(2); 5377 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 5378 5379 unsigned Opcode = 0; 5380 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) { 5381 switch (CC) { 5382 default: break; 5383 case ISD::SETOLE: // (X <= Y) ? X : Y -> min 5384 case ISD::SETULE: 5385 case ISD::SETLE: 5386 if (!UnsafeFPMath) break; 5387 // FALL THROUGH. 5388 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min 5389 case ISD::SETLT: 5390 Opcode = X86ISD::FMIN; 5391 break; 5392 5393 case ISD::SETOGT: // (X > Y) ? X : Y -> max 5394 case ISD::SETUGT: 5395 case ISD::SETGT: 5396 if (!UnsafeFPMath) break; 5397 // FALL THROUGH. 5398 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max 5399 case ISD::SETGE: 5400 Opcode = X86ISD::FMAX; 5401 break; 5402 } 5403 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) { 5404 switch (CC) { 5405 default: break; 5406 case ISD::SETOGT: // (X > Y) ? Y : X -> min 5407 case ISD::SETUGT: 5408 case ISD::SETGT: 5409 if (!UnsafeFPMath) break; 5410 // FALL THROUGH. 5411 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min 5412 case ISD::SETGE: 5413 Opcode = X86ISD::FMIN; 5414 break; 5415 5416 case ISD::SETOLE: // (X <= Y) ? Y : X -> max 5417 case ISD::SETULE: 5418 case ISD::SETLE: 5419 if (!UnsafeFPMath) break; 5420 // FALL THROUGH. 5421 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max 5422 case ISD::SETLT: 5423 Opcode = X86ISD::FMAX; 5424 break; 5425 } 5426 } 5427 5428 if (Opcode) 5429 return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS); 5430 } 5431 5432 } 5433 5434 return SDOperand(); 5435} 5436 5437 5438SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, 5439 DAGCombinerInfo &DCI) const { 5440 SelectionDAG &DAG = DCI.DAG; 5441 switch (N->getOpcode()) { 5442 default: break; 5443 case ISD::VECTOR_SHUFFLE: 5444 return PerformShuffleCombine(N, DAG, Subtarget); 5445 case ISD::SELECT: 5446 return PerformSELECTCombine(N, DAG, Subtarget); 5447 } 5448 5449 return SDOperand(); 5450} 5451 5452//===----------------------------------------------------------------------===// 5453// X86 Inline Assembly Support 5454//===----------------------------------------------------------------------===// 5455 5456/// getConstraintType - Given a constraint letter, return the type of 5457/// constraint it is for this target. 5458X86TargetLowering::ConstraintType 5459X86TargetLowering::getConstraintType(char ConstraintLetter) const { 5460 switch (ConstraintLetter) { 5461 case 'A': 5462 case 'r': 5463 case 'R': 5464 case 'l': 5465 case 'q': 5466 case 'Q': 5467 case 'x': 5468 case 'Y': 5469 return C_RegisterClass; 5470 default: return TargetLowering::getConstraintType(ConstraintLetter); 5471 } 5472} 5473 5474/// isOperandValidForConstraint - Return the specified operand (possibly 5475/// modified) if the specified SDOperand is valid for the specified target 5476/// constraint letter, otherwise return null. 5477SDOperand X86TargetLowering:: 5478isOperandValidForConstraint(SDOperand Op, char Constraint, SelectionDAG &DAG) { 5479 switch (Constraint) { 5480 default: break; 5481 case 'i': 5482 // Literal immediates are always ok. 5483 if (isa<ConstantSDNode>(Op)) return Op; 5484 5485 // If we are in non-pic codegen mode, we allow the address of a global to 5486 // be used with 'i'. 5487 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) { 5488 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 5489 return SDOperand(0, 0); 5490 5491 if (GA->getOpcode() != ISD::TargetGlobalAddress) 5492 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), 5493 GA->getOffset()); 5494 return Op; 5495 } 5496 5497 // Otherwise, not valid for this mode. 5498 return SDOperand(0, 0); 5499 } 5500 return TargetLowering::isOperandValidForConstraint(Op, Constraint, DAG); 5501} 5502 5503 5504std::vector<unsigned> X86TargetLowering:: 5505getRegClassForInlineAsmConstraint(const std::string &Constraint, 5506 MVT::ValueType VT) const { 5507 if (Constraint.size() == 1) { 5508 // FIXME: not handling fp-stack yet! 5509 // FIXME: not handling MMX registers yet ('y' constraint). 5510 switch (Constraint[0]) { // GCC X86 Constraint Letters 5511 default: break; // Unknown constraint letter 5512 case 'A': // EAX/EDX 5513 if (VT == MVT::i32 || VT == MVT::i64) 5514 return make_vector<unsigned>(X86::EAX, X86::EDX, 0); 5515 break; 5516 case 'r': // GENERAL_REGS 5517 case 'R': // LEGACY_REGS 5518 if (VT == MVT::i64 && Subtarget->is64Bit()) 5519 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 5520 X86::RSI, X86::RDI, X86::RBP, X86::RSP, 5521 X86::R8, X86::R9, X86::R10, X86::R11, 5522 X86::R12, X86::R13, X86::R14, X86::R15, 0); 5523 if (VT == MVT::i32) 5524 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 5525 X86::ESI, X86::EDI, X86::EBP, X86::ESP, 0); 5526 else if (VT == MVT::i16) 5527 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 5528 X86::SI, X86::DI, X86::BP, X86::SP, 0); 5529 else if (VT == MVT::i8) 5530 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0); 5531 break; 5532 case 'l': // INDEX_REGS 5533 if (VT == MVT::i32) 5534 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 5535 X86::ESI, X86::EDI, X86::EBP, 0); 5536 else if (VT == MVT::i16) 5537 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 5538 X86::SI, X86::DI, X86::BP, 0); 5539 else if (VT == MVT::i8) 5540 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0); 5541 break; 5542 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode) 5543 case 'Q': // Q_REGS 5544 if (VT == MVT::i32) 5545 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 5546 else if (VT == MVT::i16) 5547 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 5548 else if (VT == MVT::i8) 5549 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0); 5550 break; 5551 case 'x': // SSE_REGS if SSE1 allowed 5552 if (Subtarget->hasSSE1()) 5553 return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 5554 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7, 5555 0); 5556 return std::vector<unsigned>(); 5557 case 'Y': // SSE_REGS if SSE2 allowed 5558 if (Subtarget->hasSSE2()) 5559 return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 5560 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7, 5561 0); 5562 return std::vector<unsigned>(); 5563 } 5564 } 5565 5566 return std::vector<unsigned>(); 5567} 5568 5569std::pair<unsigned, const TargetRegisterClass*> 5570X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 5571 MVT::ValueType VT) const { 5572 // Use the default implementation in TargetLowering to convert the register 5573 // constraint into a member of a register class. 5574 std::pair<unsigned, const TargetRegisterClass*> Res; 5575 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 5576 5577 // Not found as a standard register? 5578 if (Res.second == 0) { 5579 // GCC calls "st(0)" just plain "st". 5580 if (StringsEqualNoCase("{st}", Constraint)) { 5581 Res.first = X86::ST0; 5582 Res.second = X86::RSTRegisterClass; 5583 } 5584 5585 return Res; 5586 } 5587 5588 // Otherwise, check to see if this is a register class of the wrong value 5589 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 5590 // turn into {ax},{dx}. 5591 if (Res.second->hasType(VT)) 5592 return Res; // Correct type already, nothing to do. 5593 5594 // All of the single-register GCC register classes map their values onto 5595 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 5596 // really want an 8-bit or 32-bit register, map to the appropriate register 5597 // class and return the appropriate register. 5598 if (Res.second != X86::GR16RegisterClass) 5599 return Res; 5600 5601 if (VT == MVT::i8) { 5602 unsigned DestReg = 0; 5603 switch (Res.first) { 5604 default: break; 5605 case X86::AX: DestReg = X86::AL; break; 5606 case X86::DX: DestReg = X86::DL; break; 5607 case X86::CX: DestReg = X86::CL; break; 5608 case X86::BX: DestReg = X86::BL; break; 5609 } 5610 if (DestReg) { 5611 Res.first = DestReg; 5612 Res.second = Res.second = X86::GR8RegisterClass; 5613 } 5614 } else if (VT == MVT::i32) { 5615 unsigned DestReg = 0; 5616 switch (Res.first) { 5617 default: break; 5618 case X86::AX: DestReg = X86::EAX; break; 5619 case X86::DX: DestReg = X86::EDX; break; 5620 case X86::CX: DestReg = X86::ECX; break; 5621 case X86::BX: DestReg = X86::EBX; break; 5622 case X86::SI: DestReg = X86::ESI; break; 5623 case X86::DI: DestReg = X86::EDI; break; 5624 case X86::BP: DestReg = X86::EBP; break; 5625 case X86::SP: DestReg = X86::ESP; break; 5626 } 5627 if (DestReg) { 5628 Res.first = DestReg; 5629 Res.second = Res.second = X86::GR32RegisterClass; 5630 } 5631 } else if (VT == MVT::i64) { 5632 unsigned DestReg = 0; 5633 switch (Res.first) { 5634 default: break; 5635 case X86::AX: DestReg = X86::RAX; break; 5636 case X86::DX: DestReg = X86::RDX; break; 5637 case X86::CX: DestReg = X86::RCX; break; 5638 case X86::BX: DestReg = X86::RBX; break; 5639 case X86::SI: DestReg = X86::RSI; break; 5640 case X86::DI: DestReg = X86::RDI; break; 5641 case X86::BP: DestReg = X86::RBP; break; 5642 case X86::SP: DestReg = X86::RSP; break; 5643 } 5644 if (DestReg) { 5645 Res.first = DestReg; 5646 Res.second = Res.second = X86::GR64RegisterClass; 5647 } 5648 } 5649 5650 return Res; 5651} 5652