X86ISelLowering.cpp revision 7e22977c0fab3bc46745c8aa4155db6c77663a58
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by Chris Lattner and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86ISelLowering.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86TargetMachine.h" 20#include "llvm/CallingConv.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/Function.h" 24#include "llvm/Intrinsics.h" 25#include "llvm/ADT/VectorExtras.h" 26#include "llvm/Analysis/ScalarEvolutionExpressions.h" 27#include "llvm/CodeGen/MachineFrameInfo.h" 28#include "llvm/CodeGen/MachineFunction.h" 29#include "llvm/CodeGen/MachineInstrBuilder.h" 30#include "llvm/CodeGen/SelectionDAG.h" 31#include "llvm/CodeGen/SSARegMap.h" 32#include "llvm/Support/MathExtras.h" 33#include "llvm/Target/TargetOptions.h" 34#include "llvm/Support/CommandLine.h" 35#include "llvm/ADT/StringExtras.h" 36using namespace llvm; 37 38// FIXME: temporary. 39static cl::opt<bool> EnableFastCC("enable-x86-fastcc", cl::Hidden, 40 cl::desc("Enable fastcc on X86")); 41X86TargetLowering::X86TargetLowering(TargetMachine &TM) 42 : TargetLowering(TM) { 43 Subtarget = &TM.getSubtarget<X86Subtarget>(); 44 X86ScalarSSE = Subtarget->hasSSE2(); 45 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 46 47 // Set up the TargetLowering object. 48 49 // X86 is weird, it always uses i8 for shift amounts and setcc results. 50 setShiftAmountType(MVT::i8); 51 setSetCCResultType(MVT::i8); 52 setSetCCResultContents(ZeroOrOneSetCCResult); 53 setSchedulingPreference(SchedulingForRegPressure); 54 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0 55 setStackPointerRegisterToSaveRestore(X86StackPtr); 56 57 if (Subtarget->isTargetDarwin()) { 58 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 59 setUseUnderscoreSetJmp(false); 60 setUseUnderscoreLongJmp(false); 61 } else if (Subtarget->isTargetMingw()) { 62 // MS runtime is weird: it exports _setjmp, but longjmp! 63 setUseUnderscoreSetJmp(true); 64 setUseUnderscoreLongJmp(false); 65 } else { 66 setUseUnderscoreSetJmp(true); 67 setUseUnderscoreLongJmp(true); 68 } 69 70 // Add legal addressing mode scale values. 71 addLegalAddressScale(8); 72 addLegalAddressScale(4); 73 addLegalAddressScale(2); 74 // Enter the ones which require both scale + index last. These are more 75 // expensive. 76 addLegalAddressScale(9); 77 addLegalAddressScale(5); 78 addLegalAddressScale(3); 79 80 // Set up the register classes. 81 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 82 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 83 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 84 if (Subtarget->is64Bit()) 85 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 86 87 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand); 88 89 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 90 // operation. 91 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 92 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 93 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 94 95 if (Subtarget->is64Bit()) { 96 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 97 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 98 } else { 99 if (X86ScalarSSE) 100 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP. 101 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand); 102 else 103 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 104 } 105 106 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 107 // this operation. 108 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 109 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 110 // SSE has no i16 to fp conversion, only i32 111 if (X86ScalarSSE) 112 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 113 else { 114 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 115 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 116 } 117 118 if (!Subtarget->is64Bit()) { 119 // Custom lower SINT_TO_FP and FP_TO_SINT from/to i64 in 32-bit mode. 120 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 121 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 122 } 123 124 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 125 // this operation. 126 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 127 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 128 129 if (X86ScalarSSE) { 130 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 131 } else { 132 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 133 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 134 } 135 136 // Handle FP_TO_UINT by promoting the destination to a larger signed 137 // conversion. 138 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 139 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 140 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 141 142 if (Subtarget->is64Bit()) { 143 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 144 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 145 } else { 146 if (X86ScalarSSE && !Subtarget->hasSSE3()) 147 // Expand FP_TO_UINT into a select. 148 // FIXME: We would like to use a Custom expander here eventually to do 149 // the optimal thing for SSE vs. the default expansion in the legalizer. 150 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 151 else 152 // With SSE3 we can use fisttpll to convert to a signed i64. 153 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 154 } 155 156 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 157 if (!X86ScalarSSE) { 158 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 159 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 160 } 161 162 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 163 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 164 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 165 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 166 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); 167 if (Subtarget->is64Bit()) 168 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand); 169 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand); 170 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand); 171 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 172 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 173 setOperationAction(ISD::FREM , MVT::f64 , Expand); 174 175 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 176 setOperationAction(ISD::CTTZ , MVT::i8 , Expand); 177 setOperationAction(ISD::CTLZ , MVT::i8 , Expand); 178 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 179 setOperationAction(ISD::CTTZ , MVT::i16 , Expand); 180 setOperationAction(ISD::CTLZ , MVT::i16 , Expand); 181 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 182 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 183 setOperationAction(ISD::CTLZ , MVT::i32 , Expand); 184 if (Subtarget->is64Bit()) { 185 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 186 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 187 setOperationAction(ISD::CTLZ , MVT::i64 , Expand); 188 } 189 190 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 191 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 192 193 // These should be promoted to a larger select which is supported. 194 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 195 setOperationAction(ISD::SELECT , MVT::i8 , Promote); 196 // X86 wants to expand cmov itself. 197 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 198 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 199 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 200 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 201 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 202 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 203 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 204 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 205 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 206 if (Subtarget->is64Bit()) { 207 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 208 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 209 } 210 // X86 ret instruction may pop stack. 211 setOperationAction(ISD::RET , MVT::Other, Custom); 212 // Darwin ABI issue. 213 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 214 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 215 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 216 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 217 if (Subtarget->is64Bit()) { 218 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 219 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 220 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 221 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 222 } 223 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 224 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 225 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 226 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 227 // X86 wants to expand memset / memcpy itself. 228 setOperationAction(ISD::MEMSET , MVT::Other, Custom); 229 setOperationAction(ISD::MEMCPY , MVT::Other, Custom); 230 231 // We don't have line number support yet. 232 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 233 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 234 // FIXME - use subtarget debug flags 235 if (!Subtarget->isTargetDarwin() && 236 !Subtarget->isTargetELF() && 237 !Subtarget->isTargetCygMing()) 238 setOperationAction(ISD::LABEL, MVT::Other, Expand); 239 240 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 241 setOperationAction(ISD::VASTART , MVT::Other, Custom); 242 243 // Use the default implementation. 244 setOperationAction(ISD::VAARG , MVT::Other, Expand); 245 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 246 setOperationAction(ISD::VAEND , MVT::Other, Expand); 247 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 248 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 249 if (Subtarget->is64Bit()) 250 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 251 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand); 252 253 if (X86ScalarSSE) { 254 // Set up the FP register classes. 255 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 256 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 257 258 // Use ANDPD to simulate FABS. 259 setOperationAction(ISD::FABS , MVT::f64, Custom); 260 setOperationAction(ISD::FABS , MVT::f32, Custom); 261 262 // Use XORP to simulate FNEG. 263 setOperationAction(ISD::FNEG , MVT::f64, Custom); 264 setOperationAction(ISD::FNEG , MVT::f32, Custom); 265 266 // Use ANDPD and ORPD to simulate FCOPYSIGN. 267 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 268 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 269 270 // We don't support sin/cos/fmod 271 setOperationAction(ISD::FSIN , MVT::f64, Expand); 272 setOperationAction(ISD::FCOS , MVT::f64, Expand); 273 setOperationAction(ISD::FREM , MVT::f64, Expand); 274 setOperationAction(ISD::FSIN , MVT::f32, Expand); 275 setOperationAction(ISD::FCOS , MVT::f32, Expand); 276 setOperationAction(ISD::FREM , MVT::f32, Expand); 277 278 // Expand FP immediates into loads from the stack, except for the special 279 // cases we handle. 280 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 281 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 282 addLegalFPImmediate(+0.0); // xorps / xorpd 283 } else { 284 // Set up the FP register classes. 285 addRegisterClass(MVT::f64, X86::RFPRegisterClass); 286 287 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 288 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 289 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 290 291 if (!UnsafeFPMath) { 292 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 293 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 294 } 295 296 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 297 addLegalFPImmediate(+0.0); // FLD0 298 addLegalFPImmediate(+1.0); // FLD1 299 addLegalFPImmediate(-0.0); // FLD0/FCHS 300 addLegalFPImmediate(-1.0); // FLD1/FCHS 301 } 302 303 // First set operation action for all vector types to expand. Then we 304 // will selectively turn on ones that can be effectively codegen'd. 305 for (unsigned VT = (unsigned)MVT::Vector + 1; 306 VT != (unsigned)MVT::LAST_VALUETYPE; VT++) { 307 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); 308 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); 309 setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); 310 setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); 311 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 312 setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); 313 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 314 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 315 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 316 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 317 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 318 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); 319 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); 320 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 321 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 322 } 323 324 if (Subtarget->hasMMX()) { 325 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass); 326 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass); 327 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass); 328 329 // FIXME: add MMX packed arithmetics 330 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Expand); 331 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Expand); 332 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Expand); 333 } 334 335 if (Subtarget->hasSSE1()) { 336 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 337 338 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 339 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 340 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 341 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 342 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 343 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 344 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 345 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 346 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 347 } 348 349 if (Subtarget->hasSSE2()) { 350 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 351 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 352 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 353 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 354 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 355 356 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 357 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 358 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 359 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 360 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 361 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 362 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 363 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 364 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 365 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 366 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 367 368 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 369 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 370 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 371 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 372 // Implement v4f32 insert_vector_elt in terms of SSE2 v8i16 ones. 373 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 374 375 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 376 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 377 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); 378 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); 379 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); 380 } 381 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 382 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 383 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 384 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 385 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 386 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 387 388 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 389 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 390 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); 391 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); 392 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); 393 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); 394 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); 395 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); 396 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); 397 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); 398 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 399 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); 400 } 401 402 // Custom lower v2i64 and v2f64 selects. 403 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 404 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 405 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 406 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 407 } 408 409 // We want to custom lower some of our intrinsics. 410 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 411 412 // We have target-specific dag combine patterns for the following nodes: 413 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 414 setTargetDAGCombine(ISD::SELECT); 415 416 computeRegisterProperties(); 417 418 // FIXME: These should be based on subtarget info. Plus, the values should 419 // be smaller when we are in optimizing for size mode. 420 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores 421 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores 422 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores 423 allowUnalignedMemoryAccesses = true; // x86 supports it! 424} 425 426 427//===----------------------------------------------------------------------===// 428// Return Value Calling Convention Implementation 429//===----------------------------------------------------------------------===// 430 431/// GetRetValueLocs - If we are returning a set of values with the specified 432/// value types, determine the set of registers each one will land in. This 433/// sets one element of the ResultRegs array for each element in the VTs array. 434static void GetRetValueLocs(const MVT::ValueType *VTs, unsigned NumVTs, 435 unsigned *ResultRegs, 436 const X86Subtarget *Subtarget, 437 unsigned CC) { 438 if (NumVTs == 0) return; 439 440 if (NumVTs == 2) { 441 ResultRegs[0] = VTs[0] == MVT::i64 ? X86::RAX : X86::EAX; 442 ResultRegs[1] = VTs[1] == MVT::i64 ? X86::RDX : X86::EDX; 443 return; 444 } 445 446 // Otherwise, NumVTs is 1. 447 MVT::ValueType ArgVT = VTs[0]; 448 449 unsigned Reg; 450 switch (ArgVT) { 451 case MVT::i8: Reg = X86::AL; break; 452 case MVT::i16: Reg = X86::AX; break; 453 case MVT::i32: Reg = X86::EAX; break; 454 case MVT::i64: Reg = X86::RAX; break; 455 case MVT::f32: 456 case MVT::f64: 457 if (Subtarget->is64Bit()) 458 Reg = X86::XMM0; // FP values in X86-64 go in XMM0. 459 else if (CC == CallingConv::Fast && Subtarget->hasSSE2()) 460 Reg = X86::XMM0; // FP values in X86-32 with fastcc go in XMM0. 461 else 462 Reg = X86::ST0; // FP values in X86-32 go in ST0. 463 break; 464 default: 465 assert(MVT::isVector(ArgVT) && "Unknown return value type!"); 466 Reg = X86::XMM0; // Int/FP vector result -> XMM0. 467 break; 468 } 469 ResultRegs[0] = Reg; 470} 471 472/// LowerRET - Lower an ISD::RET node. 473SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { 474 assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args"); 475 476 // Support up returning up to two registers. 477 MVT::ValueType VTs[2]; 478 unsigned DestRegs[2]; 479 unsigned NumRegs = Op.getNumOperands() / 2; 480 assert(NumRegs <= 2 && "Can only return up to two regs!"); 481 482 for (unsigned i = 0; i != NumRegs; ++i) 483 VTs[i] = Op.getOperand(i*2+1).getValueType(); 484 485 // Determine which register each value should be copied into. 486 GetRetValueLocs(VTs, NumRegs, DestRegs, Subtarget, 487 DAG.getMachineFunction().getFunction()->getCallingConv()); 488 489 // If this is the first return lowered for this function, add the regs to the 490 // liveout set for the function. 491 if (DAG.getMachineFunction().liveout_empty()) { 492 for (unsigned i = 0; i != NumRegs; ++i) 493 DAG.getMachineFunction().addLiveOut(DestRegs[i]); 494 } 495 496 SDOperand Chain = Op.getOperand(0); 497 SDOperand Flag; 498 499 // Copy the result values into the output registers. 500 if (NumRegs != 1 || DestRegs[0] != X86::ST0) { 501 for (unsigned i = 0; i != NumRegs; ++i) { 502 Chain = DAG.getCopyToReg(Chain, DestRegs[i], Op.getOperand(i*2+1), Flag); 503 Flag = Chain.getValue(1); 504 } 505 } else { 506 // We need to handle a destination of ST0 specially, because it isn't really 507 // a register. 508 SDOperand Value = Op.getOperand(1); 509 510 // If this is an FP return with ScalarSSE, we need to move the value from 511 // an XMM register onto the fp-stack. 512 if (X86ScalarSSE) { 513 SDOperand MemLoc; 514 515 // If this is a load into a scalarsse value, don't store the loaded value 516 // back to the stack, only to reload it: just replace the scalar-sse load. 517 if (ISD::isNON_EXTLoad(Value.Val) && 518 (Chain == Value.getValue(1) || Chain == Value.getOperand(0))) { 519 Chain = Value.getOperand(0); 520 MemLoc = Value.getOperand(1); 521 } else { 522 // Spill the value to memory and reload it into top of stack. 523 unsigned Size = MVT::getSizeInBits(VTs[0])/8; 524 MachineFunction &MF = DAG.getMachineFunction(); 525 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 526 MemLoc = DAG.getFrameIndex(SSFI, getPointerTy()); 527 Chain = DAG.getStore(Op.getOperand(0), Value, MemLoc, NULL, 0); 528 } 529 SDVTList Tys = DAG.getVTList(MVT::f64, MVT::Other); 530 SDOperand Ops[] = { Chain, MemLoc, DAG.getValueType(VTs[0]) }; 531 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 532 Chain = Value.getValue(1); 533 } 534 535 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 536 SDOperand Ops[] = { Chain, Value }; 537 Chain = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops, 2); 538 Flag = Chain.getValue(1); 539 } 540 541 SDOperand BytesToPop = DAG.getConstant(getBytesToPopOnReturn(), MVT::i16); 542 if (Flag.Val) 543 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop, Flag); 544 else 545 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop); 546} 547 548 549/// LowerCallResult - Lower the result values of an ISD::CALL into the 550/// appropriate copies out of appropriate physical registers. This assumes that 551/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call 552/// being lowered. The returns a SDNode with the same number of values as the 553/// ISD::CALL. 554SDNode *X86TargetLowering:: 555LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, 556 unsigned CallingConv, SelectionDAG &DAG) { 557 SmallVector<SDOperand, 8> ResultVals; 558 559 // We support returning up to two registers. 560 MVT::ValueType VTs[2]; 561 unsigned DestRegs[2]; 562 unsigned NumRegs = TheCall->getNumValues() - 1; 563 assert(NumRegs <= 2 && "Can only return up to two regs!"); 564 565 for (unsigned i = 0; i != NumRegs; ++i) 566 VTs[i] = TheCall->getValueType(i); 567 568 // Determine which register each value should be copied into. 569 GetRetValueLocs(VTs, NumRegs, DestRegs, Subtarget, CallingConv); 570 571 // Copy all of the result registers out of their specified physreg. 572 if (NumRegs != 1 || DestRegs[0] != X86::ST0) { 573 for (unsigned i = 0; i != NumRegs; ++i) { 574 Chain = DAG.getCopyFromReg(Chain, DestRegs[i], VTs[i], 575 InFlag).getValue(1); 576 InFlag = Chain.getValue(2); 577 ResultVals.push_back(Chain.getValue(0)); 578 } 579 } else { 580 // Copies from the FP stack are special, as ST0 isn't a valid register 581 // before the fp stackifier runs. 582 583 // Copy ST0 into an RFP register with FP_GET_RESULT. 584 SDVTList Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 585 SDOperand GROps[] = { Chain, InFlag }; 586 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, GROps, 2); 587 Chain = RetVal.getValue(1); 588 InFlag = RetVal.getValue(2); 589 590 // If we are using ScalarSSE, store ST(0) to the stack and reload it into 591 // an XMM register. 592 if (X86ScalarSSE) { 593 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This 594 // shouldn't be necessary except that RFP cannot be live across 595 // multiple blocks. When stackifier is fixed, they can be uncoupled. 596 MachineFunction &MF = DAG.getMachineFunction(); 597 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 598 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 599 SDOperand Ops[] = { 600 Chain, RetVal, StackSlot, DAG.getValueType(VTs[0]), InFlag 601 }; 602 Chain = DAG.getNode(X86ISD::FST, MVT::Other, Ops, 5); 603 RetVal = DAG.getLoad(VTs[0], Chain, StackSlot, NULL, 0); 604 Chain = RetVal.getValue(1); 605 } 606 607 if (VTs[0] == MVT::f32 && !X86ScalarSSE) 608 // FIXME: we would really like to remember that this FP_ROUND 609 // operation is okay to eliminate if we allow excess FP precision. 610 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal); 611 ResultVals.push_back(RetVal); 612 } 613 614 // Merge everything together with a MERGE_VALUES node. 615 ResultVals.push_back(Chain); 616 return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(), 617 &ResultVals[0], ResultVals.size()).Val; 618} 619 620 621//===----------------------------------------------------------------------===// 622// C & StdCall Calling Convention implementation 623//===----------------------------------------------------------------------===// 624// StdCall calling convention seems to be standard for many Windows' API 625// routines and around. It differs from C calling convention just a little: 626// callee should clean up the stack, not caller. Symbols should be also 627// decorated in some fancy way :) It doesn't support any vector arguments. 628 629/// AddLiveIn - This helper function adds the specified physical register to the 630/// MachineFunction as a live in value. It also creates a corresponding virtual 631/// register for it. 632static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg, 633 const TargetRegisterClass *RC) { 634 assert(RC->contains(PReg) && "Not the correct regclass!"); 635 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC); 636 MF.addLiveIn(PReg, VReg); 637 return VReg; 638} 639 640/// HowToPassArgument - Returns how an formal argument of the specified type 641/// should be passed. If it is through stack, returns the size of the stack 642/// slot; if it is through integer or XMM register, returns the number of 643/// integer or XMM registers are needed. 644static void 645HowToPassCallArgument(MVT::ValueType ObjectVT, 646 bool ArgInReg, 647 unsigned NumIntRegs, unsigned NumXMMRegs, 648 unsigned MaxNumIntRegs, 649 unsigned &ObjSize, unsigned &ObjIntRegs, 650 unsigned &ObjXMMRegs) { 651 ObjSize = 0; 652 ObjIntRegs = 0; 653 ObjXMMRegs = 0; 654 655 if (MaxNumIntRegs>3) { 656 // We don't have too much registers on ia32! :) 657 MaxNumIntRegs = 3; 658 } 659 660 switch (ObjectVT) { 661 default: assert(0 && "Unhandled argument type!"); 662 case MVT::i8: 663 if (ArgInReg && (NumIntRegs < MaxNumIntRegs)) 664 ObjIntRegs = 1; 665 else 666 ObjSize = 1; 667 break; 668 case MVT::i16: 669 if (ArgInReg && (NumIntRegs < MaxNumIntRegs)) 670 ObjIntRegs = 1; 671 else 672 ObjSize = 2; 673 break; 674 case MVT::i32: 675 if (ArgInReg && (NumIntRegs < MaxNumIntRegs)) 676 ObjIntRegs = 1; 677 else 678 ObjSize = 4; 679 break; 680 case MVT::i64: 681 if (ArgInReg && (NumIntRegs+2 <= MaxNumIntRegs)) { 682 ObjIntRegs = 2; 683 } else if (ArgInReg && (NumIntRegs+1 <= MaxNumIntRegs)) { 684 ObjIntRegs = 1; 685 ObjSize = 4; 686 } else 687 ObjSize = 8; 688 case MVT::f32: 689 ObjSize = 4; 690 break; 691 case MVT::f64: 692 ObjSize = 8; 693 break; 694 case MVT::v16i8: 695 case MVT::v8i16: 696 case MVT::v4i32: 697 case MVT::v2i64: 698 case MVT::v4f32: 699 case MVT::v2f64: 700 if (NumXMMRegs < 4) 701 ObjXMMRegs = 1; 702 else 703 ObjSize = 16; 704 break; 705 } 706} 707 708SDOperand X86TargetLowering::LowerCCCArguments(SDOperand Op, SelectionDAG &DAG, 709 bool isStdCall) { 710 unsigned NumArgs = Op.Val->getNumValues() - 1; 711 MachineFunction &MF = DAG.getMachineFunction(); 712 MachineFrameInfo *MFI = MF.getFrameInfo(); 713 SDOperand Root = Op.getOperand(0); 714 SmallVector<SDOperand, 8> ArgValues; 715 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 716 717 // Add DAG nodes to load the arguments... On entry to a function on the X86, 718 // the stack frame looks like this: 719 // 720 // [ESP] -- return address 721 // [ESP + 4] -- first argument (leftmost lexically) 722 // [ESP + 8] -- second argument, if first argument is <= 4 bytes in size 723 // ... 724 // 725 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot 726 unsigned NumSRetBytes= 0; // How much bytes on stack used for struct return 727 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing. 728 unsigned NumIntRegs = 0; // Integer regs used for parameter passing 729 730 static const unsigned XMMArgRegs[] = { 731 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 732 }; 733 static const unsigned GPRArgRegs[][3] = { 734 { X86::AL, X86::DL, X86::CL }, 735 { X86::AX, X86::DX, X86::CX }, 736 { X86::EAX, X86::EDX, X86::ECX } 737 }; 738 static const TargetRegisterClass* GPRClasses[3] = { 739 X86::GR8RegisterClass, X86::GR16RegisterClass, X86::GR32RegisterClass 740 }; 741 742 // Handle regparm attribute 743 SmallVector<bool, 8> ArgInRegs(NumArgs, false); 744 SmallVector<bool, 8> SRetArgs(NumArgs, false); 745 if (!isVarArg) { 746 for (unsigned i = 0; i<NumArgs; ++i) { 747 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(3+i))->getValue(); 748 ArgInRegs[i] = (Flags >> 1) & 1; 749 SRetArgs[i] = (Flags >> 2) & 1; 750 } 751 } 752 753 for (unsigned i = 0; i < NumArgs; ++i) { 754 MVT::ValueType ObjectVT = Op.getValue(i).getValueType(); 755 unsigned ArgIncrement = 4; 756 unsigned ObjSize = 0; 757 unsigned ObjXMMRegs = 0; 758 unsigned ObjIntRegs = 0; 759 unsigned Reg = 0; 760 SDOperand ArgValue; 761 762 HowToPassCallArgument(ObjectVT, 763 ArgInRegs[i], 764 NumIntRegs, NumXMMRegs, 3, 765 ObjSize, ObjIntRegs, ObjXMMRegs); 766 767 if (ObjSize > 4) 768 ArgIncrement = ObjSize; 769 770 if (ObjIntRegs || ObjXMMRegs) { 771 switch (ObjectVT) { 772 default: assert(0 && "Unhandled argument type!"); 773 case MVT::i8: 774 case MVT::i16: 775 case MVT::i32: { 776 unsigned RegToUse = GPRArgRegs[ObjectVT-MVT::i8][NumIntRegs]; 777 Reg = AddLiveIn(MF, RegToUse, GPRClasses[ObjectVT-MVT::i8]); 778 ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT); 779 break; 780 } 781 case MVT::v16i8: 782 case MVT::v8i16: 783 case MVT::v4i32: 784 case MVT::v2i64: 785 case MVT::v4f32: 786 case MVT::v2f64: 787 assert(!isStdCall && "Unhandled argument type!"); 788 Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], X86::VR128RegisterClass); 789 ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT); 790 break; 791 } 792 NumIntRegs += ObjIntRegs; 793 NumXMMRegs += ObjXMMRegs; 794 } 795 if (ObjSize) { 796 // XMM arguments have to be aligned on 16-byte boundary. 797 if (ObjSize == 16) 798 ArgOffset = ((ArgOffset + 15) / 16) * 16; 799 // Create the SelectionDAG nodes corresponding to a load from this 800 // parameter. 801 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); 802 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 803 ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0); 804 805 ArgOffset += ArgIncrement; // Move on to the next argument. 806 if (SRetArgs[i]) 807 NumSRetBytes += ArgIncrement; 808 } 809 810 ArgValues.push_back(ArgValue); 811 } 812 813 ArgValues.push_back(Root); 814 815 // If the function takes variable number of arguments, make a frame index for 816 // the start of the first vararg value... for expansion of llvm.va_start. 817 if (isVarArg) 818 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset); 819 820 if (isStdCall && !isVarArg) { 821 BytesToPopOnReturn = ArgOffset; // Callee pops everything.. 822 BytesCallerReserves = 0; 823 } else { 824 BytesToPopOnReturn = NumSRetBytes; // Callee pops hidden struct pointer. 825 BytesCallerReserves = ArgOffset; 826 } 827 828 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only. 829 ReturnAddrIndex = 0; // No return address slot generated yet. 830 831 832 MF.getInfo<X86FunctionInfo>()->setBytesToPopOnReturn(BytesToPopOnReturn); 833 834 // Return the new list of results. 835 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 836 &ArgValues[0], ArgValues.size()); 837} 838 839SDOperand X86TargetLowering::LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, 840 unsigned CC) { 841 SDOperand Chain = Op.getOperand(0); 842 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 843 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 844 SDOperand Callee = Op.getOperand(4); 845 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 846 847 static const unsigned XMMArgRegs[] = { 848 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 849 }; 850 static const unsigned GPR32ArgRegs[] = { 851 X86::EAX, X86::EDX, X86::ECX 852 }; 853 854 // Count how many bytes are to be pushed on the stack. 855 unsigned NumBytes = 0; 856 // Keep track of the number of integer regs passed so far. 857 unsigned NumIntRegs = 0; 858 // Keep track of the number of XMM regs passed so far. 859 unsigned NumXMMRegs = 0; 860 // How much bytes on stack used for struct return 861 unsigned NumSRetBytes= 0; 862 863 // Handle regparm attribute 864 SmallVector<bool, 8> ArgInRegs(NumOps, false); 865 SmallVector<bool, 8> SRetArgs(NumOps, false); 866 for (unsigned i = 0; i<NumOps; ++i) { 867 unsigned Flags = 868 dyn_cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue(); 869 ArgInRegs[i] = (Flags >> 1) & 1; 870 SRetArgs[i] = (Flags >> 2) & 1; 871 } 872 873 // Calculate stack frame size 874 for (unsigned i = 0; i != NumOps; ++i) { 875 SDOperand Arg = Op.getOperand(5+2*i); 876 unsigned ArgIncrement = 4; 877 unsigned ObjSize = 0; 878 unsigned ObjIntRegs = 0; 879 unsigned ObjXMMRegs = 0; 880 881 HowToPassCallArgument(Arg.getValueType(), 882 ArgInRegs[i], 883 NumIntRegs, NumXMMRegs, 3, 884 ObjSize, ObjIntRegs, ObjXMMRegs); 885 if (ObjSize > 4) 886 ArgIncrement = ObjSize; 887 888 NumIntRegs += ObjIntRegs; 889 NumXMMRegs += ObjXMMRegs; 890 if (ObjSize) { 891 // XMM arguments have to be aligned on 16-byte boundary. 892 if (ObjSize == 16) 893 NumBytes = ((NumBytes + 15) / 16) * 16; 894 NumBytes += ArgIncrement; 895 } 896 } 897 898 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 899 900 // Arguments go on the stack in reverse order, as specified by the ABI. 901 unsigned ArgOffset = 0; 902 NumXMMRegs = 0; 903 NumIntRegs = 0; 904 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 905 SmallVector<SDOperand, 8> MemOpChains; 906 SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy()); 907 for (unsigned i = 0; i != NumOps; ++i) { 908 SDOperand Arg = Op.getOperand(5+2*i); 909 unsigned ArgIncrement = 4; 910 unsigned ObjSize = 0; 911 unsigned ObjIntRegs = 0; 912 unsigned ObjXMMRegs = 0; 913 914 HowToPassCallArgument(Arg.getValueType(), 915 ArgInRegs[i], 916 NumIntRegs, NumXMMRegs, 3, 917 ObjSize, ObjIntRegs, ObjXMMRegs); 918 919 if (ObjSize > 4) 920 ArgIncrement = ObjSize; 921 922 if (Arg.getValueType() == MVT::i8 || Arg.getValueType() == MVT::i16) { 923 // Promote the integer to 32 bits. If the input type is signed use a 924 // sign extend, otherwise use a zero extend. 925 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue(); 926 927 unsigned ExtOp = (Flags & 1) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 928 Arg = DAG.getNode(ExtOp, MVT::i32, Arg); 929 } 930 931 if (ObjIntRegs || ObjXMMRegs) { 932 switch (Arg.getValueType()) { 933 default: assert(0 && "Unhandled argument type!"); 934 case MVT::i32: 935 RegsToPass.push_back(std::make_pair(GPR32ArgRegs[NumIntRegs], Arg)); 936 break; 937 case MVT::v16i8: 938 case MVT::v8i16: 939 case MVT::v4i32: 940 case MVT::v2i64: 941 case MVT::v4f32: 942 case MVT::v2f64: 943 RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg)); 944 break; 945 } 946 947 NumIntRegs += ObjIntRegs; 948 NumXMMRegs += ObjXMMRegs; 949 } 950 if (ObjSize) { 951 // XMM arguments have to be aligned on 16-byte boundary. 952 if (ObjSize == 16) 953 ArgOffset = ((ArgOffset + 15) / 16) * 16; 954 955 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 956 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 957 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 958 959 ArgOffset += ArgIncrement; // Move on to the next argument. 960 if (SRetArgs[i]) 961 NumSRetBytes += ArgIncrement; 962 } 963 } 964 965 // Sanity check: we haven't seen NumSRetBytes > 4 966 assert((NumSRetBytes<=4) && 967 "Too much space for struct-return pointer requested"); 968 969 if (!MemOpChains.empty()) 970 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 971 &MemOpChains[0], MemOpChains.size()); 972 973 // Build a sequence of copy-to-reg nodes chained together with token chain 974 // and flag operands which copy the outgoing args into registers. 975 SDOperand InFlag; 976 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 977 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 978 InFlag); 979 InFlag = Chain.getValue(1); 980 } 981 982 // ELF / PIC requires GOT in the EBX register before function calls via PLT 983 // GOT pointer. 984 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 985 Subtarget->isPICStyleGOT()) { 986 Chain = DAG.getCopyToReg(Chain, X86::EBX, 987 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 988 InFlag); 989 InFlag = Chain.getValue(1); 990 } 991 992 // If the callee is a GlobalAddress node (quite common, every direct call is) 993 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 994 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 995 // We should use extra load for direct calls to dllimported functions in 996 // non-JIT mode. 997 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), 998 getTargetMachine(), true)) 999 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1000 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1001 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1002 1003 // Returns a chain & a flag for retval copy to use. 1004 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1005 SmallVector<SDOperand, 8> Ops; 1006 Ops.push_back(Chain); 1007 Ops.push_back(Callee); 1008 1009 // Add argument registers to the end of the list so that they are known live 1010 // into the call. 1011 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1012 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1013 RegsToPass[i].second.getValueType())); 1014 1015 // Add an implicit use GOT pointer in EBX. 1016 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1017 Subtarget->isPICStyleGOT()) 1018 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 1019 1020 if (InFlag.Val) 1021 Ops.push_back(InFlag); 1022 1023 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, 1024 NodeTys, &Ops[0], Ops.size()); 1025 InFlag = Chain.getValue(1); 1026 1027 // Create the CALLSEQ_END node. 1028 unsigned NumBytesForCalleeToPush = 0; 1029 1030 if (CC == CallingConv::X86_StdCall) { 1031 if (isVarArg) 1032 NumBytesForCalleeToPush = NumSRetBytes; 1033 else 1034 NumBytesForCalleeToPush = NumBytes; 1035 } else { 1036 // If this is is a call to a struct-return function, the callee 1037 // pops the hidden struct pointer, so we have to push it back. 1038 // This is common for Darwin/X86, Linux & Mingw32 targets. 1039 NumBytesForCalleeToPush = NumSRetBytes; 1040 } 1041 1042 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1043 Ops.clear(); 1044 Ops.push_back(Chain); 1045 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1046 Ops.push_back(DAG.getConstant(NumBytesForCalleeToPush, getPointerTy())); 1047 Ops.push_back(InFlag); 1048 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1049 InFlag = Chain.getValue(1); 1050 1051 // Handle result values, copying them out of physregs into vregs that we 1052 // return. 1053 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1054} 1055 1056 1057//===----------------------------------------------------------------------===// 1058// X86-64 C Calling Convention implementation 1059//===----------------------------------------------------------------------===// 1060 1061/// HowToPassX86_64CCCArgument - Returns how an formal argument of the specified 1062/// type should be passed. If it is through stack, returns the size of the stack 1063/// slot; if it is through integer or XMM register, returns the number of 1064/// integer or XMM registers are needed. 1065static void 1066HowToPassX86_64CCCArgument(MVT::ValueType ObjectVT, 1067 unsigned NumIntRegs, unsigned NumXMMRegs, 1068 unsigned &ObjSize, unsigned &ObjIntRegs, 1069 unsigned &ObjXMMRegs) { 1070 ObjSize = 0; 1071 ObjIntRegs = 0; 1072 ObjXMMRegs = 0; 1073 1074 switch (ObjectVT) { 1075 default: assert(0 && "Unhandled argument type!"); 1076 case MVT::i8: 1077 case MVT::i16: 1078 case MVT::i32: 1079 case MVT::i64: 1080 if (NumIntRegs < 6) 1081 ObjIntRegs = 1; 1082 else { 1083 switch (ObjectVT) { 1084 default: break; 1085 case MVT::i8: ObjSize = 1; break; 1086 case MVT::i16: ObjSize = 2; break; 1087 case MVT::i32: ObjSize = 4; break; 1088 case MVT::i64: ObjSize = 8; break; 1089 } 1090 } 1091 break; 1092 case MVT::f32: 1093 case MVT::f64: 1094 case MVT::v16i8: 1095 case MVT::v8i16: 1096 case MVT::v4i32: 1097 case MVT::v2i64: 1098 case MVT::v4f32: 1099 case MVT::v2f64: 1100 if (NumXMMRegs < 8) 1101 ObjXMMRegs = 1; 1102 else { 1103 switch (ObjectVT) { 1104 default: break; 1105 case MVT::f32: ObjSize = 4; break; 1106 case MVT::f64: ObjSize = 8; break; 1107 case MVT::v16i8: 1108 case MVT::v8i16: 1109 case MVT::v4i32: 1110 case MVT::v2i64: 1111 case MVT::v4f32: 1112 case MVT::v2f64: ObjSize = 16; break; 1113 } 1114 break; 1115 } 1116 } 1117} 1118 1119SDOperand 1120X86TargetLowering::LowerX86_64CCCArguments(SDOperand Op, SelectionDAG &DAG) { 1121 unsigned NumArgs = Op.Val->getNumValues() - 1; 1122 MachineFunction &MF = DAG.getMachineFunction(); 1123 MachineFrameInfo *MFI = MF.getFrameInfo(); 1124 SDOperand Root = Op.getOperand(0); 1125 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1126 SmallVector<SDOperand, 8> ArgValues; 1127 1128 // Add DAG nodes to load the arguments... On entry to a function on the X86, 1129 // the stack frame looks like this: 1130 // 1131 // [RSP] -- return address 1132 // [RSP + 8] -- first nonreg argument (leftmost lexically) 1133 // [RSP +16] -- second nonreg argument, if 1st argument is <= 8 bytes in size 1134 // ... 1135 // 1136 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot 1137 unsigned NumIntRegs = 0; // Int regs used for parameter passing. 1138 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing. 1139 1140 static const unsigned GPR8ArgRegs[] = { 1141 X86::DIL, X86::SIL, X86::DL, X86::CL, X86::R8B, X86::R9B 1142 }; 1143 static const unsigned GPR16ArgRegs[] = { 1144 X86::DI, X86::SI, X86::DX, X86::CX, X86::R8W, X86::R9W 1145 }; 1146 static const unsigned GPR32ArgRegs[] = { 1147 X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D 1148 }; 1149 static const unsigned GPR64ArgRegs[] = { 1150 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1151 }; 1152 static const unsigned XMMArgRegs[] = { 1153 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1154 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1155 }; 1156 1157 for (unsigned i = 0; i < NumArgs; ++i) { 1158 MVT::ValueType ObjectVT = Op.getValue(i).getValueType(); 1159 unsigned ArgIncrement = 8; 1160 unsigned ObjSize = 0; 1161 unsigned ObjIntRegs = 0; 1162 unsigned ObjXMMRegs = 0; 1163 1164 // FIXME: __int128 and long double support? 1165 HowToPassX86_64CCCArgument(ObjectVT, NumIntRegs, NumXMMRegs, 1166 ObjSize, ObjIntRegs, ObjXMMRegs); 1167 if (ObjSize > 8) 1168 ArgIncrement = ObjSize; 1169 1170 unsigned Reg = 0; 1171 SDOperand ArgValue; 1172 if (ObjIntRegs || ObjXMMRegs) { 1173 switch (ObjectVT) { 1174 default: assert(0 && "Unhandled argument type!"); 1175 case MVT::i8: 1176 case MVT::i16: 1177 case MVT::i32: 1178 case MVT::i64: { 1179 TargetRegisterClass *RC = NULL; 1180 switch (ObjectVT) { 1181 default: break; 1182 case MVT::i8: 1183 RC = X86::GR8RegisterClass; 1184 Reg = GPR8ArgRegs[NumIntRegs]; 1185 break; 1186 case MVT::i16: 1187 RC = X86::GR16RegisterClass; 1188 Reg = GPR16ArgRegs[NumIntRegs]; 1189 break; 1190 case MVT::i32: 1191 RC = X86::GR32RegisterClass; 1192 Reg = GPR32ArgRegs[NumIntRegs]; 1193 break; 1194 case MVT::i64: 1195 RC = X86::GR64RegisterClass; 1196 Reg = GPR64ArgRegs[NumIntRegs]; 1197 break; 1198 } 1199 Reg = AddLiveIn(MF, Reg, RC); 1200 ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT); 1201 break; 1202 } 1203 case MVT::f32: 1204 case MVT::f64: 1205 case MVT::v16i8: 1206 case MVT::v8i16: 1207 case MVT::v4i32: 1208 case MVT::v2i64: 1209 case MVT::v4f32: 1210 case MVT::v2f64: { 1211 TargetRegisterClass *RC= (ObjectVT == MVT::f32) ? 1212 X86::FR32RegisterClass : ((ObjectVT == MVT::f64) ? 1213 X86::FR64RegisterClass : X86::VR128RegisterClass); 1214 Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], RC); 1215 ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT); 1216 break; 1217 } 1218 } 1219 NumIntRegs += ObjIntRegs; 1220 NumXMMRegs += ObjXMMRegs; 1221 } else if (ObjSize) { 1222 // XMM arguments have to be aligned on 16-byte boundary. 1223 if (ObjSize == 16) 1224 ArgOffset = ((ArgOffset + 15) / 16) * 16; 1225 // Create the SelectionDAG nodes corresponding to a load from this 1226 // parameter. 1227 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); 1228 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 1229 ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0); 1230 ArgOffset += ArgIncrement; // Move on to the next argument. 1231 } 1232 1233 ArgValues.push_back(ArgValue); 1234 } 1235 1236 // If the function takes variable number of arguments, make a frame index for 1237 // the start of the first vararg value... for expansion of llvm.va_start. 1238 if (isVarArg) { 1239 // For X86-64, if there are vararg parameters that are passed via 1240 // registers, then we must store them to their spots on the stack so they 1241 // may be loaded by deferencing the result of va_next. 1242 VarArgsGPOffset = NumIntRegs * 8; 1243 VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16; 1244 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset); 1245 RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16); 1246 1247 // Store the integer parameter registers. 1248 SmallVector<SDOperand, 8> MemOps; 1249 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 1250 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1251 DAG.getConstant(VarArgsGPOffset, getPointerTy())); 1252 for (; NumIntRegs != 6; ++NumIntRegs) { 1253 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], 1254 X86::GR64RegisterClass); 1255 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1256 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1257 MemOps.push_back(Store); 1258 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1259 DAG.getConstant(8, getPointerTy())); 1260 } 1261 1262 // Now store the XMM (fp + vector) parameter registers. 1263 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1264 DAG.getConstant(VarArgsFPOffset, getPointerTy())); 1265 for (; NumXMMRegs != 8; ++NumXMMRegs) { 1266 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 1267 X86::VR128RegisterClass); 1268 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); 1269 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1270 MemOps.push_back(Store); 1271 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1272 DAG.getConstant(16, getPointerTy())); 1273 } 1274 if (!MemOps.empty()) 1275 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 1276 &MemOps[0], MemOps.size()); 1277 } 1278 1279 ArgValues.push_back(Root); 1280 1281 ReturnAddrIndex = 0; // No return address slot generated yet. 1282 BytesToPopOnReturn = 0; // Callee pops nothing. 1283 BytesCallerReserves = ArgOffset; 1284 1285 // Return the new list of results. 1286 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1287 &ArgValues[0], ArgValues.size()); 1288} 1289 1290SDOperand 1291X86TargetLowering::LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG, 1292 unsigned CC) { 1293 SDOperand Chain = Op.getOperand(0); 1294 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1295 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 1296 SDOperand Callee = Op.getOperand(4); 1297 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 1298 1299 // Count how many bytes are to be pushed on the stack. 1300 unsigned NumBytes = 0; 1301 unsigned NumIntRegs = 0; // Int regs used for parameter passing. 1302 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing. 1303 1304 static const unsigned GPR8ArgRegs[] = { 1305 X86::DIL, X86::SIL, X86::DL, X86::CL, X86::R8B, X86::R9B 1306 }; 1307 static const unsigned GPR16ArgRegs[] = { 1308 X86::DI, X86::SI, X86::DX, X86::CX, X86::R8W, X86::R9W 1309 }; 1310 static const unsigned GPR32ArgRegs[] = { 1311 X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D 1312 }; 1313 static const unsigned GPR64ArgRegs[] = { 1314 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1315 }; 1316 static const unsigned XMMArgRegs[] = { 1317 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1318 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1319 }; 1320 1321 for (unsigned i = 0; i != NumOps; ++i) { 1322 SDOperand Arg = Op.getOperand(5+2*i); 1323 MVT::ValueType ArgVT = Arg.getValueType(); 1324 1325 switch (ArgVT) { 1326 default: assert(0 && "Unknown value type!"); 1327 case MVT::i8: 1328 case MVT::i16: 1329 case MVT::i32: 1330 case MVT::i64: 1331 if (NumIntRegs < 6) 1332 ++NumIntRegs; 1333 else 1334 NumBytes += 8; 1335 break; 1336 case MVT::f32: 1337 case MVT::f64: 1338 case MVT::v16i8: 1339 case MVT::v8i16: 1340 case MVT::v4i32: 1341 case MVT::v2i64: 1342 case MVT::v4f32: 1343 case MVT::v2f64: 1344 if (NumXMMRegs < 8) 1345 NumXMMRegs++; 1346 else if (ArgVT == MVT::f32 || ArgVT == MVT::f64) 1347 NumBytes += 8; 1348 else { 1349 // XMM arguments have to be aligned on 16-byte boundary. 1350 NumBytes = ((NumBytes + 15) / 16) * 16; 1351 NumBytes += 16; 1352 } 1353 break; 1354 } 1355 } 1356 1357 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 1358 1359 // Arguments go on the stack in reverse order, as specified by the ABI. 1360 unsigned ArgOffset = 0; 1361 NumIntRegs = 0; 1362 NumXMMRegs = 0; 1363 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1364 SmallVector<SDOperand, 8> MemOpChains; 1365 SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy()); 1366 for (unsigned i = 0; i != NumOps; ++i) { 1367 SDOperand Arg = Op.getOperand(5+2*i); 1368 MVT::ValueType ArgVT = Arg.getValueType(); 1369 1370 switch (ArgVT) { 1371 default: assert(0 && "Unexpected ValueType for argument!"); 1372 case MVT::i8: 1373 case MVT::i16: 1374 case MVT::i32: 1375 case MVT::i64: 1376 if (NumIntRegs < 6) { 1377 unsigned Reg = 0; 1378 switch (ArgVT) { 1379 default: break; 1380 case MVT::i8: Reg = GPR8ArgRegs[NumIntRegs]; break; 1381 case MVT::i16: Reg = GPR16ArgRegs[NumIntRegs]; break; 1382 case MVT::i32: Reg = GPR32ArgRegs[NumIntRegs]; break; 1383 case MVT::i64: Reg = GPR64ArgRegs[NumIntRegs]; break; 1384 } 1385 RegsToPass.push_back(std::make_pair(Reg, Arg)); 1386 ++NumIntRegs; 1387 } else { 1388 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1389 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1390 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1391 ArgOffset += 8; 1392 } 1393 break; 1394 case MVT::f32: 1395 case MVT::f64: 1396 case MVT::v16i8: 1397 case MVT::v8i16: 1398 case MVT::v4i32: 1399 case MVT::v2i64: 1400 case MVT::v4f32: 1401 case MVT::v2f64: 1402 if (NumXMMRegs < 8) { 1403 RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg)); 1404 NumXMMRegs++; 1405 } else { 1406 if (ArgVT != MVT::f32 && ArgVT != MVT::f64) { 1407 // XMM arguments have to be aligned on 16-byte boundary. 1408 ArgOffset = ((ArgOffset + 15) / 16) * 16; 1409 } 1410 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1411 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1412 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1413 if (ArgVT == MVT::f32 || ArgVT == MVT::f64) 1414 ArgOffset += 8; 1415 else 1416 ArgOffset += 16; 1417 } 1418 } 1419 } 1420 1421 if (!MemOpChains.empty()) 1422 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1423 &MemOpChains[0], MemOpChains.size()); 1424 1425 // Build a sequence of copy-to-reg nodes chained together with token chain 1426 // and flag operands which copy the outgoing args into registers. 1427 SDOperand InFlag; 1428 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1429 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1430 InFlag); 1431 InFlag = Chain.getValue(1); 1432 } 1433 1434 if (isVarArg) { 1435 // From AMD64 ABI document: 1436 // For calls that may call functions that use varargs or stdargs 1437 // (prototype-less calls or calls to functions containing ellipsis (...) in 1438 // the declaration) %al is used as hidden argument to specify the number 1439 // of SSE registers used. The contents of %al do not need to match exactly 1440 // the number of registers, but must be an ubound on the number of SSE 1441 // registers used and is in the range 0 - 8 inclusive. 1442 Chain = DAG.getCopyToReg(Chain, X86::AL, 1443 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 1444 InFlag = Chain.getValue(1); 1445 } 1446 1447 // If the callee is a GlobalAddress node (quite common, every direct call is) 1448 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1449 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1450 // We should use extra load for direct calls to dllimported functions in 1451 // non-JIT mode. 1452 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1453 getTargetMachine(), true)) 1454 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1455 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1456 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1457 1458 // Returns a chain & a flag for retval copy to use. 1459 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1460 SmallVector<SDOperand, 8> Ops; 1461 Ops.push_back(Chain); 1462 Ops.push_back(Callee); 1463 1464 // Add argument registers to the end of the list so that they are known live 1465 // into the call. 1466 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1467 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1468 RegsToPass[i].second.getValueType())); 1469 1470 if (InFlag.Val) 1471 Ops.push_back(InFlag); 1472 1473 // FIXME: Do not generate X86ISD::TAILCALL for now. 1474 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, 1475 NodeTys, &Ops[0], Ops.size()); 1476 InFlag = Chain.getValue(1); 1477 1478 // Returns a flag for retval copy to use. 1479 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1480 Ops.clear(); 1481 Ops.push_back(Chain); 1482 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1483 Ops.push_back(DAG.getConstant(0, getPointerTy())); 1484 Ops.push_back(InFlag); 1485 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1486 InFlag = Chain.getValue(1); 1487 1488 // Handle result values, copying them out of physregs into vregs that we 1489 // return. 1490 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1491} 1492 1493//===----------------------------------------------------------------------===// 1494// Fast & FastCall Calling Convention implementation 1495//===----------------------------------------------------------------------===// 1496// 1497// The X86 'fast' calling convention passes up to two integer arguments in 1498// registers (an appropriate portion of EAX/EDX), passes arguments in C order, 1499// and requires that the callee pop its arguments off the stack (allowing proper 1500// tail calls), and has the same return value conventions as C calling convs. 1501// 1502// This calling convention always arranges for the callee pop value to be 8n+4 1503// bytes, which is needed for tail recursion elimination and stack alignment 1504// reasons. 1505// 1506// Note that this can be enhanced in the future to pass fp vals in registers 1507// (when we have a global fp allocator) and do other tricks. 1508// 1509//===----------------------------------------------------------------------===// 1510// The X86 'fastcall' calling convention passes up to two integer arguments in 1511// registers (an appropriate portion of ECX/EDX), passes arguments in C order, 1512// and requires that the callee pop its arguments off the stack (allowing proper 1513// tail calls), and has the same return value conventions as C calling convs. 1514// 1515// This calling convention always arranges for the callee pop value to be 8n+4 1516// bytes, which is needed for tail recursion elimination and stack alignment 1517// reasons. 1518 1519 1520SDOperand 1521X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG, 1522 bool isFastCall) { 1523 unsigned NumArgs = Op.Val->getNumValues()-1; 1524 MachineFunction &MF = DAG.getMachineFunction(); 1525 MachineFrameInfo *MFI = MF.getFrameInfo(); 1526 SDOperand Root = Op.getOperand(0); 1527 SmallVector<SDOperand, 8> ArgValues; 1528 1529 // Add DAG nodes to load the arguments... On entry to a function the stack 1530 // frame looks like this: 1531 // 1532 // [ESP] -- return address 1533 // [ESP + 4] -- first nonreg argument (leftmost lexically) 1534 // [ESP + 8] -- second nonreg argument, if 1st argument is <= 4 bytes in size 1535 // ... 1536 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot 1537 1538 // Keep track of the number of integer regs passed so far. This can be either 1539 // 0 (neither EAX/ECX or EDX used), 1 (EAX/ECX is used) or 2 (EAX/ECX and EDX 1540 // are both used). 1541 unsigned NumIntRegs = 0; 1542 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing. 1543 1544 static const unsigned XMMArgRegs[] = { 1545 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 1546 }; 1547 1548 static const unsigned GPRArgRegs[][2][2] = { 1549 {{ X86::AL, X86::DL }, { X86::CL, X86::DL }}, 1550 {{ X86::AX, X86::DX }, { X86::CX, X86::DX }}, 1551 {{ X86::EAX, X86::EDX }, { X86::ECX, X86::EDX }} 1552 }; 1553 1554 static const TargetRegisterClass* GPRClasses[3] = { 1555 X86::GR8RegisterClass, X86::GR16RegisterClass, X86::GR32RegisterClass 1556 }; 1557 1558 unsigned GPRInd = (isFastCall ? 1 : 0); 1559 for (unsigned i = 0; i < NumArgs; ++i) { 1560 MVT::ValueType ObjectVT = Op.getValue(i).getValueType(); 1561 unsigned ArgIncrement = 4; 1562 unsigned ObjSize = 0; 1563 unsigned ObjXMMRegs = 0; 1564 unsigned ObjIntRegs = 0; 1565 unsigned Reg = 0; 1566 SDOperand ArgValue; 1567 1568 HowToPassCallArgument(ObjectVT, 1569 true, // Use as much registers as possible 1570 NumIntRegs, NumXMMRegs, 1571 (isFastCall ? 2 : FASTCC_NUM_INT_ARGS_INREGS), 1572 ObjSize, ObjIntRegs, ObjXMMRegs); 1573 1574 if (ObjSize > 4) 1575 ArgIncrement = ObjSize; 1576 1577 if (ObjIntRegs || ObjXMMRegs) { 1578 switch (ObjectVT) { 1579 default: assert(0 && "Unhandled argument type!"); 1580 case MVT::i8: 1581 case MVT::i16: 1582 case MVT::i32: { 1583 unsigned RegToUse = GPRArgRegs[ObjectVT-MVT::i8][GPRInd][NumIntRegs]; 1584 Reg = AddLiveIn(MF, RegToUse, GPRClasses[ObjectVT-MVT::i8]); 1585 ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT); 1586 break; 1587 } 1588 case MVT::v16i8: 1589 case MVT::v8i16: 1590 case MVT::v4i32: 1591 case MVT::v2i64: 1592 case MVT::v4f32: 1593 case MVT::v2f64: { 1594 assert(!isFastCall && "Unhandled argument type!"); 1595 Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], X86::VR128RegisterClass); 1596 ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT); 1597 break; 1598 } 1599 } 1600 NumIntRegs += ObjIntRegs; 1601 NumXMMRegs += ObjXMMRegs; 1602 } 1603 if (ObjSize) { 1604 // XMM arguments have to be aligned on 16-byte boundary. 1605 if (ObjSize == 16) 1606 ArgOffset = ((ArgOffset + 15) / 16) * 16; 1607 // Create the SelectionDAG nodes corresponding to a load from this 1608 // parameter. 1609 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); 1610 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 1611 ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0); 1612 1613 ArgOffset += ArgIncrement; // Move on to the next argument. 1614 } 1615 1616 ArgValues.push_back(ArgValue); 1617 } 1618 1619 ArgValues.push_back(Root); 1620 1621 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1622 // arguments and the arguments after the retaddr has been pushed are aligned. 1623 if ((ArgOffset & 7) == 0) 1624 ArgOffset += 4; 1625 1626 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 1627 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only. 1628 ReturnAddrIndex = 0; // No return address slot generated yet. 1629 BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments. 1630 BytesCallerReserves = 0; 1631 1632 MF.getInfo<X86FunctionInfo>()->setBytesToPopOnReturn(BytesToPopOnReturn); 1633 1634 // Finally, inform the code generator which regs we return values in. 1635 switch (getValueType(MF.getFunction()->getReturnType())) { 1636 default: assert(0 && "Unknown type!"); 1637 case MVT::isVoid: break; 1638 case MVT::i1: 1639 case MVT::i8: 1640 case MVT::i16: 1641 case MVT::i32: 1642 MF.addLiveOut(X86::EAX); 1643 break; 1644 case MVT::i64: 1645 MF.addLiveOut(X86::EAX); 1646 MF.addLiveOut(X86::EDX); 1647 break; 1648 case MVT::f32: 1649 case MVT::f64: 1650 MF.addLiveOut(X86::ST0); 1651 break; 1652 case MVT::v16i8: 1653 case MVT::v8i16: 1654 case MVT::v4i32: 1655 case MVT::v2i64: 1656 case MVT::v4f32: 1657 case MVT::v2f64: 1658 assert(!isFastCall && "Unknown result type"); 1659 MF.addLiveOut(X86::XMM0); 1660 break; 1661 } 1662 1663 // Return the new list of results. 1664 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1665 &ArgValues[0], ArgValues.size()); 1666} 1667 1668SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG, 1669 unsigned CC) { 1670 SDOperand Chain = Op.getOperand(0); 1671 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 1672 SDOperand Callee = Op.getOperand(4); 1673 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 1674 1675 // Count how many bytes are to be pushed on the stack. 1676 unsigned NumBytes = 0; 1677 1678 // Keep track of the number of integer regs passed so far. This can be either 1679 // 0 (neither EAX/ECX or EDX used), 1 (EAX/ECX is used) or 2 (EAX/ECX and EDX 1680 // are both used). 1681 unsigned NumIntRegs = 0; 1682 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing. 1683 1684 static const unsigned GPRArgRegs[][2][2] = { 1685 {{ X86::AL, X86::DL }, { X86::CL, X86::DL }}, 1686 {{ X86::AX, X86::DX }, { X86::CX, X86::DX }}, 1687 {{ X86::EAX, X86::EDX }, { X86::ECX, X86::EDX }} 1688 }; 1689 static const unsigned XMMArgRegs[] = { 1690 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3 1691 }; 1692 1693 bool isFastCall = CC == CallingConv::X86_FastCall; 1694 unsigned GPRInd = isFastCall ? 1 : 0; 1695 for (unsigned i = 0; i != NumOps; ++i) { 1696 SDOperand Arg = Op.getOperand(5+2*i); 1697 1698 switch (Arg.getValueType()) { 1699 default: assert(0 && "Unknown value type!"); 1700 case MVT::i8: 1701 case MVT::i16: 1702 case MVT::i32: { 1703 unsigned MaxNumIntRegs = (isFastCall ? 2 : FASTCC_NUM_INT_ARGS_INREGS); 1704 if (NumIntRegs < MaxNumIntRegs) { 1705 ++NumIntRegs; 1706 break; 1707 } 1708 } // Fall through 1709 case MVT::f32: 1710 NumBytes += 4; 1711 break; 1712 case MVT::f64: 1713 NumBytes += 8; 1714 break; 1715 case MVT::v16i8: 1716 case MVT::v8i16: 1717 case MVT::v4i32: 1718 case MVT::v2i64: 1719 case MVT::v4f32: 1720 case MVT::v2f64: 1721 assert(!isFastCall && "Unknown value type!"); 1722 if (NumXMMRegs < 4) 1723 NumXMMRegs++; 1724 else { 1725 // XMM arguments have to be aligned on 16-byte boundary. 1726 NumBytes = ((NumBytes + 15) / 16) * 16; 1727 NumBytes += 16; 1728 } 1729 break; 1730 } 1731 } 1732 1733 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1734 // arguments and the arguments after the retaddr has been pushed are aligned. 1735 if ((NumBytes & 7) == 0) 1736 NumBytes += 4; 1737 1738 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 1739 1740 // Arguments go on the stack in reverse order, as specified by the ABI. 1741 unsigned ArgOffset = 0; 1742 NumIntRegs = 0; 1743 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1744 SmallVector<SDOperand, 8> MemOpChains; 1745 SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy()); 1746 for (unsigned i = 0; i != NumOps; ++i) { 1747 SDOperand Arg = Op.getOperand(5+2*i); 1748 1749 switch (Arg.getValueType()) { 1750 default: assert(0 && "Unexpected ValueType for argument!"); 1751 case MVT::i8: 1752 case MVT::i16: 1753 case MVT::i32: { 1754 unsigned MaxNumIntRegs = (isFastCall ? 2 : FASTCC_NUM_INT_ARGS_INREGS); 1755 if (NumIntRegs < MaxNumIntRegs) { 1756 unsigned RegToUse = 1757 GPRArgRegs[Arg.getValueType()-MVT::i8][GPRInd][NumIntRegs]; 1758 RegsToPass.push_back(std::make_pair(RegToUse, Arg)); 1759 ++NumIntRegs; 1760 break; 1761 } 1762 } // Fall through 1763 case MVT::f32: { 1764 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1765 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1766 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1767 ArgOffset += 4; 1768 break; 1769 } 1770 case MVT::f64: { 1771 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1772 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1773 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1774 ArgOffset += 8; 1775 break; 1776 } 1777 case MVT::v16i8: 1778 case MVT::v8i16: 1779 case MVT::v4i32: 1780 case MVT::v2i64: 1781 case MVT::v4f32: 1782 case MVT::v2f64: 1783 assert(!isFastCall && "Unexpected ValueType for argument!"); 1784 if (NumXMMRegs < 4) { 1785 RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg)); 1786 NumXMMRegs++; 1787 } else { 1788 // XMM arguments have to be aligned on 16-byte boundary. 1789 ArgOffset = ((ArgOffset + 15) / 16) * 16; 1790 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1791 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1792 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1793 ArgOffset += 16; 1794 } 1795 break; 1796 } 1797 } 1798 1799 if (!MemOpChains.empty()) 1800 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1801 &MemOpChains[0], MemOpChains.size()); 1802 1803 // Build a sequence of copy-to-reg nodes chained together with token chain 1804 // and flag operands which copy the outgoing args into registers. 1805 SDOperand InFlag; 1806 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1807 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1808 InFlag); 1809 InFlag = Chain.getValue(1); 1810 } 1811 1812 // If the callee is a GlobalAddress node (quite common, every direct call is) 1813 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1814 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1815 // We should use extra load for direct calls to dllimported functions in 1816 // non-JIT mode. 1817 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1818 getTargetMachine(), true)) 1819 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1820 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1821 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1822 1823 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1824 // GOT pointer. 1825 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1826 Subtarget->isPICStyleGOT()) { 1827 Chain = DAG.getCopyToReg(Chain, X86::EBX, 1828 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 1829 InFlag); 1830 InFlag = Chain.getValue(1); 1831 } 1832 1833 // Returns a chain & a flag for retval copy to use. 1834 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1835 SmallVector<SDOperand, 8> Ops; 1836 Ops.push_back(Chain); 1837 Ops.push_back(Callee); 1838 1839 // Add argument registers to the end of the list so that they are known live 1840 // into the call. 1841 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1842 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1843 RegsToPass[i].second.getValueType())); 1844 1845 // Add an implicit use GOT pointer in EBX. 1846 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1847 Subtarget->isPICStyleGOT()) 1848 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 1849 1850 if (InFlag.Val) 1851 Ops.push_back(InFlag); 1852 1853 // FIXME: Do not generate X86ISD::TAILCALL for now. 1854 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, 1855 NodeTys, &Ops[0], Ops.size()); 1856 InFlag = Chain.getValue(1); 1857 1858 // Returns a flag for retval copy to use. 1859 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1860 Ops.clear(); 1861 Ops.push_back(Chain); 1862 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1863 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1864 Ops.push_back(InFlag); 1865 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1866 InFlag = Chain.getValue(1); 1867 1868 // Handle result values, copying them out of physregs into vregs that we 1869 // return. 1870 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1871} 1872 1873SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { 1874 if (ReturnAddrIndex == 0) { 1875 // Set up a frame object for the return address. 1876 MachineFunction &MF = DAG.getMachineFunction(); 1877 if (Subtarget->is64Bit()) 1878 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8); 1879 else 1880 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4); 1881 } 1882 1883 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 1884} 1885 1886 1887 1888/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86 1889/// specific condition code. It returns a false if it cannot do a direct 1890/// translation. X86CC is the translated CondCode. LHS/RHS are modified as 1891/// needed. 1892static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 1893 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS, 1894 SelectionDAG &DAG) { 1895 X86CC = X86::COND_INVALID; 1896 if (!isFP) { 1897 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 1898 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 1899 // X > -1 -> X == 0, jump !sign. 1900 RHS = DAG.getConstant(0, RHS.getValueType()); 1901 X86CC = X86::COND_NS; 1902 return true; 1903 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 1904 // X < 0 -> X == 0, jump on sign. 1905 X86CC = X86::COND_S; 1906 return true; 1907 } 1908 } 1909 1910 switch (SetCCOpcode) { 1911 default: break; 1912 case ISD::SETEQ: X86CC = X86::COND_E; break; 1913 case ISD::SETGT: X86CC = X86::COND_G; break; 1914 case ISD::SETGE: X86CC = X86::COND_GE; break; 1915 case ISD::SETLT: X86CC = X86::COND_L; break; 1916 case ISD::SETLE: X86CC = X86::COND_LE; break; 1917 case ISD::SETNE: X86CC = X86::COND_NE; break; 1918 case ISD::SETULT: X86CC = X86::COND_B; break; 1919 case ISD::SETUGT: X86CC = X86::COND_A; break; 1920 case ISD::SETULE: X86CC = X86::COND_BE; break; 1921 case ISD::SETUGE: X86CC = X86::COND_AE; break; 1922 } 1923 } else { 1924 // On a floating point condition, the flags are set as follows: 1925 // ZF PF CF op 1926 // 0 | 0 | 0 | X > Y 1927 // 0 | 0 | 1 | X < Y 1928 // 1 | 0 | 0 | X == Y 1929 // 1 | 1 | 1 | unordered 1930 bool Flip = false; 1931 switch (SetCCOpcode) { 1932 default: break; 1933 case ISD::SETUEQ: 1934 case ISD::SETEQ: X86CC = X86::COND_E; break; 1935 case ISD::SETOLT: Flip = true; // Fallthrough 1936 case ISD::SETOGT: 1937 case ISD::SETGT: X86CC = X86::COND_A; break; 1938 case ISD::SETOLE: Flip = true; // Fallthrough 1939 case ISD::SETOGE: 1940 case ISD::SETGE: X86CC = X86::COND_AE; break; 1941 case ISD::SETUGT: Flip = true; // Fallthrough 1942 case ISD::SETULT: 1943 case ISD::SETLT: X86CC = X86::COND_B; break; 1944 case ISD::SETUGE: Flip = true; // Fallthrough 1945 case ISD::SETULE: 1946 case ISD::SETLE: X86CC = X86::COND_BE; break; 1947 case ISD::SETONE: 1948 case ISD::SETNE: X86CC = X86::COND_NE; break; 1949 case ISD::SETUO: X86CC = X86::COND_P; break; 1950 case ISD::SETO: X86CC = X86::COND_NP; break; 1951 } 1952 if (Flip) 1953 std::swap(LHS, RHS); 1954 } 1955 1956 return X86CC != X86::COND_INVALID; 1957} 1958 1959/// hasFPCMov - is there a floating point cmov for the specific X86 condition 1960/// code. Current x86 isa includes the following FP cmov instructions: 1961/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 1962static bool hasFPCMov(unsigned X86CC) { 1963 switch (X86CC) { 1964 default: 1965 return false; 1966 case X86::COND_B: 1967 case X86::COND_BE: 1968 case X86::COND_E: 1969 case X86::COND_P: 1970 case X86::COND_A: 1971 case X86::COND_AE: 1972 case X86::COND_NE: 1973 case X86::COND_NP: 1974 return true; 1975 } 1976} 1977 1978/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return 1979/// true if Op is undef or if its value falls within the specified range (L, H]. 1980static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { 1981 if (Op.getOpcode() == ISD::UNDEF) 1982 return true; 1983 1984 unsigned Val = cast<ConstantSDNode>(Op)->getValue(); 1985 return (Val >= Low && Val < Hi); 1986} 1987 1988/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return 1989/// true if Op is undef or if its value equal to the specified value. 1990static bool isUndefOrEqual(SDOperand Op, unsigned Val) { 1991 if (Op.getOpcode() == ISD::UNDEF) 1992 return true; 1993 return cast<ConstantSDNode>(Op)->getValue() == Val; 1994} 1995 1996/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand 1997/// specifies a shuffle of elements that is suitable for input to PSHUFD. 1998bool X86::isPSHUFDMask(SDNode *N) { 1999 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2000 2001 if (N->getNumOperands() != 4) 2002 return false; 2003 2004 // Check if the value doesn't reference the second vector. 2005 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2006 SDOperand Arg = N->getOperand(i); 2007 if (Arg.getOpcode() == ISD::UNDEF) continue; 2008 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2009 if (cast<ConstantSDNode>(Arg)->getValue() >= 4) 2010 return false; 2011 } 2012 2013 return true; 2014} 2015 2016/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand 2017/// specifies a shuffle of elements that is suitable for input to PSHUFHW. 2018bool X86::isPSHUFHWMask(SDNode *N) { 2019 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2020 2021 if (N->getNumOperands() != 8) 2022 return false; 2023 2024 // Lower quadword copied in order. 2025 for (unsigned i = 0; i != 4; ++i) { 2026 SDOperand Arg = N->getOperand(i); 2027 if (Arg.getOpcode() == ISD::UNDEF) continue; 2028 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2029 if (cast<ConstantSDNode>(Arg)->getValue() != i) 2030 return false; 2031 } 2032 2033 // Upper quadword shuffled. 2034 for (unsigned i = 4; i != 8; ++i) { 2035 SDOperand Arg = N->getOperand(i); 2036 if (Arg.getOpcode() == ISD::UNDEF) continue; 2037 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2038 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2039 if (Val < 4 || Val > 7) 2040 return false; 2041 } 2042 2043 return true; 2044} 2045 2046/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand 2047/// specifies a shuffle of elements that is suitable for input to PSHUFLW. 2048bool X86::isPSHUFLWMask(SDNode *N) { 2049 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2050 2051 if (N->getNumOperands() != 8) 2052 return false; 2053 2054 // Upper quadword copied in order. 2055 for (unsigned i = 4; i != 8; ++i) 2056 if (!isUndefOrEqual(N->getOperand(i), i)) 2057 return false; 2058 2059 // Lower quadword shuffled. 2060 for (unsigned i = 0; i != 4; ++i) 2061 if (!isUndefOrInRange(N->getOperand(i), 0, 4)) 2062 return false; 2063 2064 return true; 2065} 2066 2067/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 2068/// specifies a shuffle of elements that is suitable for input to SHUFP*. 2069static bool isSHUFPMask(const SDOperand *Elems, unsigned NumElems) { 2070 if (NumElems != 2 && NumElems != 4) return false; 2071 2072 unsigned Half = NumElems / 2; 2073 for (unsigned i = 0; i < Half; ++i) 2074 if (!isUndefOrInRange(Elems[i], 0, NumElems)) 2075 return false; 2076 for (unsigned i = Half; i < NumElems; ++i) 2077 if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2)) 2078 return false; 2079 2080 return true; 2081} 2082 2083bool X86::isSHUFPMask(SDNode *N) { 2084 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2085 return ::isSHUFPMask(N->op_begin(), N->getNumOperands()); 2086} 2087 2088/// isCommutedSHUFP - Returns true if the shuffle mask is except 2089/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 2090/// half elements to come from vector 1 (which would equal the dest.) and 2091/// the upper half to come from vector 2. 2092static bool isCommutedSHUFP(const SDOperand *Ops, unsigned NumOps) { 2093 if (NumOps != 2 && NumOps != 4) return false; 2094 2095 unsigned Half = NumOps / 2; 2096 for (unsigned i = 0; i < Half; ++i) 2097 if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2)) 2098 return false; 2099 for (unsigned i = Half; i < NumOps; ++i) 2100 if (!isUndefOrInRange(Ops[i], 0, NumOps)) 2101 return false; 2102 return true; 2103} 2104 2105static bool isCommutedSHUFP(SDNode *N) { 2106 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2107 return isCommutedSHUFP(N->op_begin(), N->getNumOperands()); 2108} 2109 2110/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 2111/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 2112bool X86::isMOVHLPSMask(SDNode *N) { 2113 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2114 2115 if (N->getNumOperands() != 4) 2116 return false; 2117 2118 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 2119 return isUndefOrEqual(N->getOperand(0), 6) && 2120 isUndefOrEqual(N->getOperand(1), 7) && 2121 isUndefOrEqual(N->getOperand(2), 2) && 2122 isUndefOrEqual(N->getOperand(3), 3); 2123} 2124 2125/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 2126/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 2127/// <2, 3, 2, 3> 2128bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) { 2129 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2130 2131 if (N->getNumOperands() != 4) 2132 return false; 2133 2134 // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3 2135 return isUndefOrEqual(N->getOperand(0), 2) && 2136 isUndefOrEqual(N->getOperand(1), 3) && 2137 isUndefOrEqual(N->getOperand(2), 2) && 2138 isUndefOrEqual(N->getOperand(3), 3); 2139} 2140 2141/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 2142/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 2143bool X86::isMOVLPMask(SDNode *N) { 2144 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2145 2146 unsigned NumElems = N->getNumOperands(); 2147 if (NumElems != 2 && NumElems != 4) 2148 return false; 2149 2150 for (unsigned i = 0; i < NumElems/2; ++i) 2151 if (!isUndefOrEqual(N->getOperand(i), i + NumElems)) 2152 return false; 2153 2154 for (unsigned i = NumElems/2; i < NumElems; ++i) 2155 if (!isUndefOrEqual(N->getOperand(i), i)) 2156 return false; 2157 2158 return true; 2159} 2160 2161/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand 2162/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D} 2163/// and MOVLHPS. 2164bool X86::isMOVHPMask(SDNode *N) { 2165 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2166 2167 unsigned NumElems = N->getNumOperands(); 2168 if (NumElems != 2 && NumElems != 4) 2169 return false; 2170 2171 for (unsigned i = 0; i < NumElems/2; ++i) 2172 if (!isUndefOrEqual(N->getOperand(i), i)) 2173 return false; 2174 2175 for (unsigned i = 0; i < NumElems/2; ++i) { 2176 SDOperand Arg = N->getOperand(i + NumElems/2); 2177 if (!isUndefOrEqual(Arg, i + NumElems)) 2178 return false; 2179 } 2180 2181 return true; 2182} 2183 2184/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 2185/// specifies a shuffle of elements that is suitable for input to UNPCKL. 2186bool static isUNPCKLMask(const SDOperand *Elts, unsigned NumElts, 2187 bool V2IsSplat = false) { 2188 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2189 return false; 2190 2191 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2192 SDOperand BitI = Elts[i]; 2193 SDOperand BitI1 = Elts[i+1]; 2194 if (!isUndefOrEqual(BitI, j)) 2195 return false; 2196 if (V2IsSplat) { 2197 if (isUndefOrEqual(BitI1, NumElts)) 2198 return false; 2199 } else { 2200 if (!isUndefOrEqual(BitI1, j + NumElts)) 2201 return false; 2202 } 2203 } 2204 2205 return true; 2206} 2207 2208bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) { 2209 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2210 return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2211} 2212 2213/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 2214/// specifies a shuffle of elements that is suitable for input to UNPCKH. 2215bool static isUNPCKHMask(const SDOperand *Elts, unsigned NumElts, 2216 bool V2IsSplat = false) { 2217 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2218 return false; 2219 2220 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2221 SDOperand BitI = Elts[i]; 2222 SDOperand BitI1 = Elts[i+1]; 2223 if (!isUndefOrEqual(BitI, j + NumElts/2)) 2224 return false; 2225 if (V2IsSplat) { 2226 if (isUndefOrEqual(BitI1, NumElts)) 2227 return false; 2228 } else { 2229 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts)) 2230 return false; 2231 } 2232 } 2233 2234 return true; 2235} 2236 2237bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) { 2238 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2239 return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2240} 2241 2242/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 2243/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 2244/// <0, 0, 1, 1> 2245bool X86::isUNPCKL_v_undef_Mask(SDNode *N) { 2246 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2247 2248 unsigned NumElems = N->getNumOperands(); 2249 if (NumElems != 4 && NumElems != 8 && NumElems != 16) 2250 return false; 2251 2252 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2253 SDOperand BitI = N->getOperand(i); 2254 SDOperand BitI1 = N->getOperand(i+1); 2255 2256 if (!isUndefOrEqual(BitI, j)) 2257 return false; 2258 if (!isUndefOrEqual(BitI1, j)) 2259 return false; 2260 } 2261 2262 return true; 2263} 2264 2265/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 2266/// specifies a shuffle of elements that is suitable for input to MOVSS, 2267/// MOVSD, and MOVD, i.e. setting the lowest element. 2268static bool isMOVLMask(const SDOperand *Elts, unsigned NumElts) { 2269 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2270 return false; 2271 2272 if (!isUndefOrEqual(Elts[0], NumElts)) 2273 return false; 2274 2275 for (unsigned i = 1; i < NumElts; ++i) { 2276 if (!isUndefOrEqual(Elts[i], i)) 2277 return false; 2278 } 2279 2280 return true; 2281} 2282 2283bool X86::isMOVLMask(SDNode *N) { 2284 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2285 return ::isMOVLMask(N->op_begin(), N->getNumOperands()); 2286} 2287 2288/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 2289/// of what x86 movss want. X86 movs requires the lowest element to be lowest 2290/// element of vector 2 and the other elements to come from vector 1 in order. 2291static bool isCommutedMOVL(const SDOperand *Ops, unsigned NumOps, 2292 bool V2IsSplat = false, 2293 bool V2IsUndef = false) { 2294 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 2295 return false; 2296 2297 if (!isUndefOrEqual(Ops[0], 0)) 2298 return false; 2299 2300 for (unsigned i = 1; i < NumOps; ++i) { 2301 SDOperand Arg = Ops[i]; 2302 if (!(isUndefOrEqual(Arg, i+NumOps) || 2303 (V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) || 2304 (V2IsSplat && isUndefOrEqual(Arg, NumOps)))) 2305 return false; 2306 } 2307 2308 return true; 2309} 2310 2311static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false, 2312 bool V2IsUndef = false) { 2313 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2314 return isCommutedMOVL(N->op_begin(), N->getNumOperands(), 2315 V2IsSplat, V2IsUndef); 2316} 2317 2318/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2319/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 2320bool X86::isMOVSHDUPMask(SDNode *N) { 2321 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2322 2323 if (N->getNumOperands() != 4) 2324 return false; 2325 2326 // Expect 1, 1, 3, 3 2327 for (unsigned i = 0; i < 2; ++i) { 2328 SDOperand Arg = N->getOperand(i); 2329 if (Arg.getOpcode() == ISD::UNDEF) continue; 2330 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2331 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2332 if (Val != 1) return false; 2333 } 2334 2335 bool HasHi = false; 2336 for (unsigned i = 2; i < 4; ++i) { 2337 SDOperand Arg = N->getOperand(i); 2338 if (Arg.getOpcode() == ISD::UNDEF) continue; 2339 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2340 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2341 if (Val != 3) return false; 2342 HasHi = true; 2343 } 2344 2345 // Don't use movshdup if it can be done with a shufps. 2346 return HasHi; 2347} 2348 2349/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2350/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 2351bool X86::isMOVSLDUPMask(SDNode *N) { 2352 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2353 2354 if (N->getNumOperands() != 4) 2355 return false; 2356 2357 // Expect 0, 0, 2, 2 2358 for (unsigned i = 0; i < 2; ++i) { 2359 SDOperand Arg = N->getOperand(i); 2360 if (Arg.getOpcode() == ISD::UNDEF) continue; 2361 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2362 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2363 if (Val != 0) return false; 2364 } 2365 2366 bool HasHi = false; 2367 for (unsigned i = 2; i < 4; ++i) { 2368 SDOperand Arg = N->getOperand(i); 2369 if (Arg.getOpcode() == ISD::UNDEF) continue; 2370 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2371 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2372 if (Val != 2) return false; 2373 HasHi = true; 2374 } 2375 2376 // Don't use movshdup if it can be done with a shufps. 2377 return HasHi; 2378} 2379 2380/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2381/// a splat of a single element. 2382static bool isSplatMask(SDNode *N) { 2383 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2384 2385 // This is a splat operation if each element of the permute is the same, and 2386 // if the value doesn't reference the second vector. 2387 unsigned NumElems = N->getNumOperands(); 2388 SDOperand ElementBase; 2389 unsigned i = 0; 2390 for (; i != NumElems; ++i) { 2391 SDOperand Elt = N->getOperand(i); 2392 if (isa<ConstantSDNode>(Elt)) { 2393 ElementBase = Elt; 2394 break; 2395 } 2396 } 2397 2398 if (!ElementBase.Val) 2399 return false; 2400 2401 for (; i != NumElems; ++i) { 2402 SDOperand Arg = N->getOperand(i); 2403 if (Arg.getOpcode() == ISD::UNDEF) continue; 2404 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2405 if (Arg != ElementBase) return false; 2406 } 2407 2408 // Make sure it is a splat of the first vector operand. 2409 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems; 2410} 2411 2412/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2413/// a splat of a single element and it's a 2 or 4 element mask. 2414bool X86::isSplatMask(SDNode *N) { 2415 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2416 2417 // We can only splat 64-bit, and 32-bit quantities with a single instruction. 2418 if (N->getNumOperands() != 4 && N->getNumOperands() != 2) 2419 return false; 2420 return ::isSplatMask(N); 2421} 2422 2423/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand 2424/// specifies a splat of zero element. 2425bool X86::isSplatLoMask(SDNode *N) { 2426 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2427 2428 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) 2429 if (!isUndefOrEqual(N->getOperand(i), 0)) 2430 return false; 2431 return true; 2432} 2433 2434/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 2435/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* 2436/// instructions. 2437unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 2438 unsigned NumOperands = N->getNumOperands(); 2439 unsigned Shift = (NumOperands == 4) ? 2 : 1; 2440 unsigned Mask = 0; 2441 for (unsigned i = 0; i < NumOperands; ++i) { 2442 unsigned Val = 0; 2443 SDOperand Arg = N->getOperand(NumOperands-i-1); 2444 if (Arg.getOpcode() != ISD::UNDEF) 2445 Val = cast<ConstantSDNode>(Arg)->getValue(); 2446 if (Val >= NumOperands) Val -= NumOperands; 2447 Mask |= Val; 2448 if (i != NumOperands - 1) 2449 Mask <<= Shift; 2450 } 2451 2452 return Mask; 2453} 2454 2455/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 2456/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW 2457/// instructions. 2458unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 2459 unsigned Mask = 0; 2460 // 8 nodes, but we only care about the last 4. 2461 for (unsigned i = 7; i >= 4; --i) { 2462 unsigned Val = 0; 2463 SDOperand Arg = N->getOperand(i); 2464 if (Arg.getOpcode() != ISD::UNDEF) 2465 Val = cast<ConstantSDNode>(Arg)->getValue(); 2466 Mask |= (Val - 4); 2467 if (i != 4) 2468 Mask <<= 2; 2469 } 2470 2471 return Mask; 2472} 2473 2474/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 2475/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW 2476/// instructions. 2477unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 2478 unsigned Mask = 0; 2479 // 8 nodes, but we only care about the first 4. 2480 for (int i = 3; i >= 0; --i) { 2481 unsigned Val = 0; 2482 SDOperand Arg = N->getOperand(i); 2483 if (Arg.getOpcode() != ISD::UNDEF) 2484 Val = cast<ConstantSDNode>(Arg)->getValue(); 2485 Mask |= Val; 2486 if (i != 0) 2487 Mask <<= 2; 2488 } 2489 2490 return Mask; 2491} 2492 2493/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand 2494/// specifies a 8 element shuffle that can be broken into a pair of 2495/// PSHUFHW and PSHUFLW. 2496static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { 2497 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2498 2499 if (N->getNumOperands() != 8) 2500 return false; 2501 2502 // Lower quadword shuffled. 2503 for (unsigned i = 0; i != 4; ++i) { 2504 SDOperand Arg = N->getOperand(i); 2505 if (Arg.getOpcode() == ISD::UNDEF) continue; 2506 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2507 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2508 if (Val > 4) 2509 return false; 2510 } 2511 2512 // Upper quadword shuffled. 2513 for (unsigned i = 4; i != 8; ++i) { 2514 SDOperand Arg = N->getOperand(i); 2515 if (Arg.getOpcode() == ISD::UNDEF) continue; 2516 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2517 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2518 if (Val < 4 || Val > 7) 2519 return false; 2520 } 2521 2522 return true; 2523} 2524 2525/// CommuteVectorShuffle - Swap vector_shuffle operandsas well as 2526/// values in ther permute mask. 2527static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, 2528 SDOperand &V2, SDOperand &Mask, 2529 SelectionDAG &DAG) { 2530 MVT::ValueType VT = Op.getValueType(); 2531 MVT::ValueType MaskVT = Mask.getValueType(); 2532 MVT::ValueType EltVT = MVT::getVectorBaseType(MaskVT); 2533 unsigned NumElems = Mask.getNumOperands(); 2534 SmallVector<SDOperand, 8> MaskVec; 2535 2536 for (unsigned i = 0; i != NumElems; ++i) { 2537 SDOperand Arg = Mask.getOperand(i); 2538 if (Arg.getOpcode() == ISD::UNDEF) { 2539 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2540 continue; 2541 } 2542 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2543 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2544 if (Val < NumElems) 2545 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2546 else 2547 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2548 } 2549 2550 std::swap(V1, V2); 2551 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2552 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2553} 2554 2555/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 2556/// match movhlps. The lower half elements should come from upper half of 2557/// V1 (and in order), and the upper half elements should come from the upper 2558/// half of V2 (and in order). 2559static bool ShouldXformToMOVHLPS(SDNode *Mask) { 2560 unsigned NumElems = Mask->getNumOperands(); 2561 if (NumElems != 4) 2562 return false; 2563 for (unsigned i = 0, e = 2; i != e; ++i) 2564 if (!isUndefOrEqual(Mask->getOperand(i), i+2)) 2565 return false; 2566 for (unsigned i = 2; i != 4; ++i) 2567 if (!isUndefOrEqual(Mask->getOperand(i), i+4)) 2568 return false; 2569 return true; 2570} 2571 2572/// isScalarLoadToVector - Returns true if the node is a scalar load that 2573/// is promoted to a vector. 2574static inline bool isScalarLoadToVector(SDNode *N) { 2575 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) { 2576 N = N->getOperand(0).Val; 2577 return ISD::isNON_EXTLoad(N); 2578 } 2579 return false; 2580} 2581 2582/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 2583/// match movlp{s|d}. The lower half elements should come from lower half of 2584/// V1 (and in order), and the upper half elements should come from the upper 2585/// half of V2 (and in order). And since V1 will become the source of the 2586/// MOVLP, it must be either a vector load or a scalar load to vector. 2587static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) { 2588 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 2589 return false; 2590 // Is V2 is a vector load, don't do this transformation. We will try to use 2591 // load folding shufps op. 2592 if (ISD::isNON_EXTLoad(V2)) 2593 return false; 2594 2595 unsigned NumElems = Mask->getNumOperands(); 2596 if (NumElems != 2 && NumElems != 4) 2597 return false; 2598 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 2599 if (!isUndefOrEqual(Mask->getOperand(i), i)) 2600 return false; 2601 for (unsigned i = NumElems/2; i != NumElems; ++i) 2602 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems)) 2603 return false; 2604 return true; 2605} 2606 2607/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 2608/// all the same. 2609static bool isSplatVector(SDNode *N) { 2610 if (N->getOpcode() != ISD::BUILD_VECTOR) 2611 return false; 2612 2613 SDOperand SplatValue = N->getOperand(0); 2614 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 2615 if (N->getOperand(i) != SplatValue) 2616 return false; 2617 return true; 2618} 2619 2620/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2621/// to an undef. 2622static bool isUndefShuffle(SDNode *N) { 2623 if (N->getOpcode() != ISD::BUILD_VECTOR) 2624 return false; 2625 2626 SDOperand V1 = N->getOperand(0); 2627 SDOperand V2 = N->getOperand(1); 2628 SDOperand Mask = N->getOperand(2); 2629 unsigned NumElems = Mask.getNumOperands(); 2630 for (unsigned i = 0; i != NumElems; ++i) { 2631 SDOperand Arg = Mask.getOperand(i); 2632 if (Arg.getOpcode() != ISD::UNDEF) { 2633 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2634 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF) 2635 return false; 2636 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF) 2637 return false; 2638 } 2639 } 2640 return true; 2641} 2642 2643/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 2644/// that point to V2 points to its first element. 2645static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { 2646 assert(Mask.getOpcode() == ISD::BUILD_VECTOR); 2647 2648 bool Changed = false; 2649 SmallVector<SDOperand, 8> MaskVec; 2650 unsigned NumElems = Mask.getNumOperands(); 2651 for (unsigned i = 0; i != NumElems; ++i) { 2652 SDOperand Arg = Mask.getOperand(i); 2653 if (Arg.getOpcode() != ISD::UNDEF) { 2654 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2655 if (Val > NumElems) { 2656 Arg = DAG.getConstant(NumElems, Arg.getValueType()); 2657 Changed = true; 2658 } 2659 } 2660 MaskVec.push_back(Arg); 2661 } 2662 2663 if (Changed) 2664 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), 2665 &MaskVec[0], MaskVec.size()); 2666 return Mask; 2667} 2668 2669/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 2670/// operation of specified width. 2671static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { 2672 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2673 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 2674 2675 SmallVector<SDOperand, 8> MaskVec; 2676 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); 2677 for (unsigned i = 1; i != NumElems; ++i) 2678 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2679 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2680} 2681 2682/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation 2683/// of specified width. 2684static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { 2685 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2686 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 2687 SmallVector<SDOperand, 8> MaskVec; 2688 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 2689 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2690 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); 2691 } 2692 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2693} 2694 2695/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation 2696/// of specified width. 2697static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { 2698 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2699 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 2700 unsigned Half = NumElems/2; 2701 SmallVector<SDOperand, 8> MaskVec; 2702 for (unsigned i = 0; i != Half; ++i) { 2703 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); 2704 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); 2705 } 2706 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2707} 2708 2709/// getZeroVector - Returns a vector of specified type with all zero elements. 2710/// 2711static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) { 2712 assert(MVT::isVector(VT) && "Expected a vector type"); 2713 unsigned NumElems = getVectorNumElements(VT); 2714 MVT::ValueType EVT = MVT::getVectorBaseType(VT); 2715 bool isFP = MVT::isFloatingPoint(EVT); 2716 SDOperand Zero = isFP ? DAG.getConstantFP(0.0, EVT) : DAG.getConstant(0, EVT); 2717 SmallVector<SDOperand, 8> ZeroVec(NumElems, Zero); 2718 return DAG.getNode(ISD::BUILD_VECTOR, VT, &ZeroVec[0], ZeroVec.size()); 2719} 2720 2721/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32. 2722/// 2723static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) { 2724 SDOperand V1 = Op.getOperand(0); 2725 SDOperand Mask = Op.getOperand(2); 2726 MVT::ValueType VT = Op.getValueType(); 2727 unsigned NumElems = Mask.getNumOperands(); 2728 Mask = getUnpacklMask(NumElems, DAG); 2729 while (NumElems != 4) { 2730 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask); 2731 NumElems >>= 1; 2732 } 2733 V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1); 2734 2735 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 2736 Mask = getZeroVector(MaskVT, DAG); 2737 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1, 2738 DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask); 2739 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); 2740} 2741 2742/// isZeroNode - Returns true if Elt is a constant zero or a floating point 2743/// constant +0.0. 2744static inline bool isZeroNode(SDOperand Elt) { 2745 return ((isa<ConstantSDNode>(Elt) && 2746 cast<ConstantSDNode>(Elt)->getValue() == 0) || 2747 (isa<ConstantFPSDNode>(Elt) && 2748 cast<ConstantFPSDNode>(Elt)->isExactlyValue(0.0))); 2749} 2750 2751/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 2752/// vector and zero or undef vector. 2753static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT, 2754 unsigned NumElems, unsigned Idx, 2755 bool isZero, SelectionDAG &DAG) { 2756 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT); 2757 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2758 MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT); 2759 SDOperand Zero = DAG.getConstant(0, EVT); 2760 SmallVector<SDOperand, 8> MaskVec(NumElems, Zero); 2761 MaskVec[Idx] = DAG.getConstant(NumElems, EVT); 2762 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2763 &MaskVec[0], MaskVec.size()); 2764 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2765} 2766 2767/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 2768/// 2769static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, 2770 unsigned NumNonZero, unsigned NumZero, 2771 SelectionDAG &DAG, TargetLowering &TLI) { 2772 if (NumNonZero > 8) 2773 return SDOperand(); 2774 2775 SDOperand V(0, 0); 2776 bool First = true; 2777 for (unsigned i = 0; i < 16; ++i) { 2778 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 2779 if (ThisIsNonZero && First) { 2780 if (NumZero) 2781 V = getZeroVector(MVT::v8i16, DAG); 2782 else 2783 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2784 First = false; 2785 } 2786 2787 if ((i & 1) != 0) { 2788 SDOperand ThisElt(0, 0), LastElt(0, 0); 2789 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 2790 if (LastIsNonZero) { 2791 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); 2792 } 2793 if (ThisIsNonZero) { 2794 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i)); 2795 ThisElt = DAG.getNode(ISD::SHL, MVT::i16, 2796 ThisElt, DAG.getConstant(8, MVT::i8)); 2797 if (LastIsNonZero) 2798 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt); 2799 } else 2800 ThisElt = LastElt; 2801 2802 if (ThisElt.Val) 2803 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt, 2804 DAG.getConstant(i/2, TLI.getPointerTy())); 2805 } 2806 } 2807 2808 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V); 2809} 2810 2811/// LowerBuildVectorv16i8 - Custom lower build_vector of v8i16. 2812/// 2813static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, 2814 unsigned NumNonZero, unsigned NumZero, 2815 SelectionDAG &DAG, TargetLowering &TLI) { 2816 if (NumNonZero > 4) 2817 return SDOperand(); 2818 2819 SDOperand V(0, 0); 2820 bool First = true; 2821 for (unsigned i = 0; i < 8; ++i) { 2822 bool isNonZero = (NonZeros & (1 << i)) != 0; 2823 if (isNonZero) { 2824 if (First) { 2825 if (NumZero) 2826 V = getZeroVector(MVT::v8i16, DAG); 2827 else 2828 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2829 First = false; 2830 } 2831 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i), 2832 DAG.getConstant(i, TLI.getPointerTy())); 2833 } 2834 } 2835 2836 return V; 2837} 2838 2839SDOperand 2840X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 2841 // All zero's are handled with pxor. 2842 if (ISD::isBuildVectorAllZeros(Op.Val)) 2843 return Op; 2844 2845 // All one's are handled with pcmpeqd. 2846 if (ISD::isBuildVectorAllOnes(Op.Val)) 2847 return Op; 2848 2849 MVT::ValueType VT = Op.getValueType(); 2850 MVT::ValueType EVT = MVT::getVectorBaseType(VT); 2851 unsigned EVTBits = MVT::getSizeInBits(EVT); 2852 2853 unsigned NumElems = Op.getNumOperands(); 2854 unsigned NumZero = 0; 2855 unsigned NumNonZero = 0; 2856 unsigned NonZeros = 0; 2857 std::set<SDOperand> Values; 2858 for (unsigned i = 0; i < NumElems; ++i) { 2859 SDOperand Elt = Op.getOperand(i); 2860 if (Elt.getOpcode() != ISD::UNDEF) { 2861 Values.insert(Elt); 2862 if (isZeroNode(Elt)) 2863 NumZero++; 2864 else { 2865 NonZeros |= (1 << i); 2866 NumNonZero++; 2867 } 2868 } 2869 } 2870 2871 if (NumNonZero == 0) 2872 // Must be a mix of zero and undef. Return a zero vector. 2873 return getZeroVector(VT, DAG); 2874 2875 // Splat is obviously ok. Let legalizer expand it to a shuffle. 2876 if (Values.size() == 1) 2877 return SDOperand(); 2878 2879 // Special case for single non-zero element. 2880 if (NumNonZero == 1) { 2881 unsigned Idx = CountTrailingZeros_32(NonZeros); 2882 SDOperand Item = Op.getOperand(Idx); 2883 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 2884 if (Idx == 0) 2885 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 2886 return getShuffleVectorZeroOrUndef(Item, VT, NumElems, Idx, 2887 NumZero > 0, DAG); 2888 2889 if (EVTBits == 32) { 2890 // Turn it into a shuffle of zero and zero-extended scalar to vector. 2891 Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0, 2892 DAG); 2893 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2894 MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT); 2895 SmallVector<SDOperand, 8> MaskVec; 2896 for (unsigned i = 0; i < NumElems; i++) 2897 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); 2898 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2899 &MaskVec[0], MaskVec.size()); 2900 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item, 2901 DAG.getNode(ISD::UNDEF, VT), Mask); 2902 } 2903 } 2904 2905 // Let legalizer expand 2-wide build_vector's. 2906 if (EVTBits == 64) 2907 return SDOperand(); 2908 2909 // If element VT is < 32 bits, convert it to inserts into a zero vector. 2910 if (EVTBits == 8) { 2911 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 2912 *this); 2913 if (V.Val) return V; 2914 } 2915 2916 if (EVTBits == 16) { 2917 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 2918 *this); 2919 if (V.Val) return V; 2920 } 2921 2922 // If element VT is == 32 bits, turn it into a number of shuffles. 2923 SmallVector<SDOperand, 8> V; 2924 V.resize(NumElems); 2925 if (NumElems == 4 && NumZero > 0) { 2926 for (unsigned i = 0; i < 4; ++i) { 2927 bool isZero = !(NonZeros & (1 << i)); 2928 if (isZero) 2929 V[i] = getZeroVector(VT, DAG); 2930 else 2931 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 2932 } 2933 2934 for (unsigned i = 0; i < 2; ++i) { 2935 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 2936 default: break; 2937 case 0: 2938 V[i] = V[i*2]; // Must be a zero vector. 2939 break; 2940 case 1: 2941 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2], 2942 getMOVLMask(NumElems, DAG)); 2943 break; 2944 case 2: 2945 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 2946 getMOVLMask(NumElems, DAG)); 2947 break; 2948 case 3: 2949 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 2950 getUnpacklMask(NumElems, DAG)); 2951 break; 2952 } 2953 } 2954 2955 // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd) 2956 // clears the upper bits. 2957 // FIXME: we can do the same for v4f32 case when we know both parts of 2958 // the lower half come from scalar_to_vector (loadf32). We should do 2959 // that in post legalizer dag combiner with target specific hooks. 2960 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0) 2961 return V[0]; 2962 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2963 MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT); 2964 SmallVector<SDOperand, 8> MaskVec; 2965 bool Reverse = (NonZeros & 0x3) == 2; 2966 for (unsigned i = 0; i < 2; ++i) 2967 if (Reverse) 2968 MaskVec.push_back(DAG.getConstant(1-i, EVT)); 2969 else 2970 MaskVec.push_back(DAG.getConstant(i, EVT)); 2971 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 2972 for (unsigned i = 0; i < 2; ++i) 2973 if (Reverse) 2974 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); 2975 else 2976 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); 2977 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2978 &MaskVec[0], MaskVec.size()); 2979 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask); 2980 } 2981 2982 if (Values.size() > 2) { 2983 // Expand into a number of unpckl*. 2984 // e.g. for v4f32 2985 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 2986 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 2987 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 2988 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG); 2989 for (unsigned i = 0; i < NumElems; ++i) 2990 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 2991 NumElems >>= 1; 2992 while (NumElems != 0) { 2993 for (unsigned i = 0; i < NumElems; ++i) 2994 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems], 2995 UnpckMask); 2996 NumElems >>= 1; 2997 } 2998 return V[0]; 2999 } 3000 3001 return SDOperand(); 3002} 3003 3004SDOperand 3005X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 3006 SDOperand V1 = Op.getOperand(0); 3007 SDOperand V2 = Op.getOperand(1); 3008 SDOperand PermMask = Op.getOperand(2); 3009 MVT::ValueType VT = Op.getValueType(); 3010 unsigned NumElems = PermMask.getNumOperands(); 3011 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 3012 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 3013 bool V1IsSplat = false; 3014 bool V2IsSplat = false; 3015 3016 if (isUndefShuffle(Op.Val)) 3017 return DAG.getNode(ISD::UNDEF, VT); 3018 3019 if (isSplatMask(PermMask.Val)) { 3020 if (NumElems <= 4) return Op; 3021 // Promote it to a v4i32 splat. 3022 return PromoteSplat(Op, DAG); 3023 } 3024 3025 if (X86::isMOVLMask(PermMask.Val)) 3026 return (V1IsUndef) ? V2 : Op; 3027 3028 if (X86::isMOVSHDUPMask(PermMask.Val) || 3029 X86::isMOVSLDUPMask(PermMask.Val) || 3030 X86::isMOVHLPSMask(PermMask.Val) || 3031 X86::isMOVHPMask(PermMask.Val) || 3032 X86::isMOVLPMask(PermMask.Val)) 3033 return Op; 3034 3035 if (ShouldXformToMOVHLPS(PermMask.Val) || 3036 ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val)) 3037 return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3038 3039 bool Commuted = false; 3040 V1IsSplat = isSplatVector(V1.Val); 3041 V2IsSplat = isSplatVector(V2.Val); 3042 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 3043 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3044 std::swap(V1IsSplat, V2IsSplat); 3045 std::swap(V1IsUndef, V2IsUndef); 3046 Commuted = true; 3047 } 3048 3049 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) { 3050 if (V2IsUndef) return V1; 3051 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3052 if (V2IsSplat) { 3053 // V2 is a splat, so the mask may be malformed. That is, it may point 3054 // to any V2 element. The instruction selectior won't like this. Get 3055 // a corrected mask and commute to form a proper MOVS{S|D}. 3056 SDOperand NewMask = getMOVLMask(NumElems, DAG); 3057 if (NewMask.Val != PermMask.Val) 3058 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3059 } 3060 return Op; 3061 } 3062 3063 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3064 X86::isUNPCKLMask(PermMask.Val) || 3065 X86::isUNPCKHMask(PermMask.Val)) 3066 return Op; 3067 3068 if (V2IsSplat) { 3069 // Normalize mask so all entries that point to V2 points to its first 3070 // element then try to match unpck{h|l} again. If match, return a 3071 // new vector_shuffle with the corrected mask. 3072 SDOperand NewMask = NormalizeMask(PermMask, DAG); 3073 if (NewMask.Val != PermMask.Val) { 3074 if (X86::isUNPCKLMask(PermMask.Val, true)) { 3075 SDOperand NewMask = getUnpacklMask(NumElems, DAG); 3076 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3077 } else if (X86::isUNPCKHMask(PermMask.Val, true)) { 3078 SDOperand NewMask = getUnpackhMask(NumElems, DAG); 3079 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3080 } 3081 } 3082 } 3083 3084 // Normalize the node to match x86 shuffle ops if needed 3085 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val)) 3086 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3087 3088 if (Commuted) { 3089 // Commute is back and try unpck* again. 3090 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3091 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3092 X86::isUNPCKLMask(PermMask.Val) || 3093 X86::isUNPCKHMask(PermMask.Val)) 3094 return Op; 3095 } 3096 3097 // If VT is integer, try PSHUF* first, then SHUFP*. 3098 if (MVT::isInteger(VT)) { 3099 if (X86::isPSHUFDMask(PermMask.Val) || 3100 X86::isPSHUFHWMask(PermMask.Val) || 3101 X86::isPSHUFLWMask(PermMask.Val)) { 3102 if (V2.getOpcode() != ISD::UNDEF) 3103 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3104 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3105 return Op; 3106 } 3107 3108 if (X86::isSHUFPMask(PermMask.Val)) 3109 return Op; 3110 3111 // Handle v8i16 shuffle high / low shuffle node pair. 3112 if (VT == MVT::v8i16 && isPSHUFHW_PSHUFLWMask(PermMask.Val)) { 3113 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3114 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 3115 SmallVector<SDOperand, 8> MaskVec; 3116 for (unsigned i = 0; i != 4; ++i) 3117 MaskVec.push_back(PermMask.getOperand(i)); 3118 for (unsigned i = 4; i != 8; ++i) 3119 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3120 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3121 &MaskVec[0], MaskVec.size()); 3122 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 3123 MaskVec.clear(); 3124 for (unsigned i = 0; i != 4; ++i) 3125 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3126 for (unsigned i = 4; i != 8; ++i) 3127 MaskVec.push_back(PermMask.getOperand(i)); 3128 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0],MaskVec.size()); 3129 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 3130 } 3131 } else { 3132 // Floating point cases in the other order. 3133 if (X86::isSHUFPMask(PermMask.Val)) 3134 return Op; 3135 if (X86::isPSHUFDMask(PermMask.Val) || 3136 X86::isPSHUFHWMask(PermMask.Val) || 3137 X86::isPSHUFLWMask(PermMask.Val)) { 3138 if (V2.getOpcode() != ISD::UNDEF) 3139 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3140 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3141 return Op; 3142 } 3143 } 3144 3145 if (NumElems == 4) { 3146 MVT::ValueType MaskVT = PermMask.getValueType(); 3147 MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT); 3148 SmallVector<std::pair<int, int>, 8> Locs; 3149 Locs.reserve(NumElems); 3150 SmallVector<SDOperand, 8> Mask1(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3151 SmallVector<SDOperand, 8> Mask2(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3152 unsigned NumHi = 0; 3153 unsigned NumLo = 0; 3154 // If no more than two elements come from either vector. This can be 3155 // implemented with two shuffles. First shuffle gather the elements. 3156 // The second shuffle, which takes the first shuffle as both of its 3157 // vector operands, put the elements into the right order. 3158 for (unsigned i = 0; i != NumElems; ++i) { 3159 SDOperand Elt = PermMask.getOperand(i); 3160 if (Elt.getOpcode() == ISD::UNDEF) { 3161 Locs[i] = std::make_pair(-1, -1); 3162 } else { 3163 unsigned Val = cast<ConstantSDNode>(Elt)->getValue(); 3164 if (Val < NumElems) { 3165 Locs[i] = std::make_pair(0, NumLo); 3166 Mask1[NumLo] = Elt; 3167 NumLo++; 3168 } else { 3169 Locs[i] = std::make_pair(1, NumHi); 3170 if (2+NumHi < NumElems) 3171 Mask1[2+NumHi] = Elt; 3172 NumHi++; 3173 } 3174 } 3175 } 3176 if (NumLo <= 2 && NumHi <= 2) { 3177 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3178 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3179 &Mask1[0], Mask1.size())); 3180 for (unsigned i = 0; i != NumElems; ++i) { 3181 if (Locs[i].first == -1) 3182 continue; 3183 else { 3184 unsigned Idx = (i < NumElems/2) ? 0 : NumElems; 3185 Idx += Locs[i].first * (NumElems/2) + Locs[i].second; 3186 Mask2[i] = DAG.getConstant(Idx, MaskEVT); 3187 } 3188 } 3189 3190 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, 3191 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3192 &Mask2[0], Mask2.size())); 3193 } 3194 3195 // Break it into (shuffle shuffle_hi, shuffle_lo). 3196 Locs.clear(); 3197 SmallVector<SDOperand,8> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3198 SmallVector<SDOperand,8> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3199 SmallVector<SDOperand,8> *MaskPtr = &LoMask; 3200 unsigned MaskIdx = 0; 3201 unsigned LoIdx = 0; 3202 unsigned HiIdx = NumElems/2; 3203 for (unsigned i = 0; i != NumElems; ++i) { 3204 if (i == NumElems/2) { 3205 MaskPtr = &HiMask; 3206 MaskIdx = 1; 3207 LoIdx = 0; 3208 HiIdx = NumElems/2; 3209 } 3210 SDOperand Elt = PermMask.getOperand(i); 3211 if (Elt.getOpcode() == ISD::UNDEF) { 3212 Locs[i] = std::make_pair(-1, -1); 3213 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) { 3214 Locs[i] = std::make_pair(MaskIdx, LoIdx); 3215 (*MaskPtr)[LoIdx] = Elt; 3216 LoIdx++; 3217 } else { 3218 Locs[i] = std::make_pair(MaskIdx, HiIdx); 3219 (*MaskPtr)[HiIdx] = Elt; 3220 HiIdx++; 3221 } 3222 } 3223 3224 SDOperand LoShuffle = 3225 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3226 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3227 &LoMask[0], LoMask.size())); 3228 SDOperand HiShuffle = 3229 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3230 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3231 &HiMask[0], HiMask.size())); 3232 SmallVector<SDOperand, 8> MaskOps; 3233 for (unsigned i = 0; i != NumElems; ++i) { 3234 if (Locs[i].first == -1) { 3235 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3236 } else { 3237 unsigned Idx = Locs[i].first * NumElems + Locs[i].second; 3238 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT)); 3239 } 3240 } 3241 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle, 3242 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3243 &MaskOps[0], MaskOps.size())); 3244 } 3245 3246 return SDOperand(); 3247} 3248 3249SDOperand 3250X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3251 if (!isa<ConstantSDNode>(Op.getOperand(1))) 3252 return SDOperand(); 3253 3254 MVT::ValueType VT = Op.getValueType(); 3255 // TODO: handle v16i8. 3256 if (MVT::getSizeInBits(VT) == 16) { 3257 // Transform it so it match pextrw which produces a 32-bit result. 3258 MVT::ValueType EVT = (MVT::ValueType)(VT+1); 3259 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, 3260 Op.getOperand(0), Op.getOperand(1)); 3261 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, 3262 DAG.getValueType(VT)); 3263 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3264 } else if (MVT::getSizeInBits(VT) == 32) { 3265 SDOperand Vec = Op.getOperand(0); 3266 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3267 if (Idx == 0) 3268 return Op; 3269 // SHUFPS the element to the lowest double word, then movss. 3270 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3271 SmallVector<SDOperand, 8> IdxVec; 3272 IdxVec.push_back(DAG.getConstant(Idx, MVT::getVectorBaseType(MaskVT))); 3273 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT))); 3274 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT))); 3275 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT))); 3276 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3277 &IdxVec[0], IdxVec.size()); 3278 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3279 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3280 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3281 DAG.getConstant(0, getPointerTy())); 3282 } else if (MVT::getSizeInBits(VT) == 64) { 3283 SDOperand Vec = Op.getOperand(0); 3284 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3285 if (Idx == 0) 3286 return Op; 3287 3288 // UNPCKHPD the element to the lowest double word, then movsd. 3289 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 3290 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 3291 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3292 SmallVector<SDOperand, 8> IdxVec; 3293 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorBaseType(MaskVT))); 3294 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT))); 3295 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3296 &IdxVec[0], IdxVec.size()); 3297 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3298 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3299 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3300 DAG.getConstant(0, getPointerTy())); 3301 } 3302 3303 return SDOperand(); 3304} 3305 3306SDOperand 3307X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3308 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 3309 // as its second argument. 3310 MVT::ValueType VT = Op.getValueType(); 3311 MVT::ValueType BaseVT = MVT::getVectorBaseType(VT); 3312 SDOperand N0 = Op.getOperand(0); 3313 SDOperand N1 = Op.getOperand(1); 3314 SDOperand N2 = Op.getOperand(2); 3315 if (MVT::getSizeInBits(BaseVT) == 16) { 3316 if (N1.getValueType() != MVT::i32) 3317 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3318 if (N2.getValueType() != MVT::i32) 3319 N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(), MVT::i32); 3320 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2); 3321 } else if (MVT::getSizeInBits(BaseVT) == 32) { 3322 unsigned Idx = cast<ConstantSDNode>(N2)->getValue(); 3323 if (Idx == 0) { 3324 // Use a movss. 3325 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, N1); 3326 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3327 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 3328 SmallVector<SDOperand, 8> MaskVec; 3329 MaskVec.push_back(DAG.getConstant(4, BaseVT)); 3330 for (unsigned i = 1; i <= 3; ++i) 3331 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3332 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, N0, N1, 3333 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3334 &MaskVec[0], MaskVec.size())); 3335 } else { 3336 // Use two pinsrw instructions to insert a 32 bit value. 3337 Idx <<= 1; 3338 if (MVT::isFloatingPoint(N1.getValueType())) { 3339 if (ISD::isNON_EXTLoad(N1.Val)) { 3340 // Just load directly from f32mem to GR32. 3341 LoadSDNode *LD = cast<LoadSDNode>(N1); 3342 N1 = DAG.getLoad(MVT::i32, LD->getChain(), LD->getBasePtr(), 3343 LD->getSrcValue(), LD->getSrcValueOffset()); 3344 } else { 3345 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v4f32, N1); 3346 N1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, N1); 3347 N1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, N1, 3348 DAG.getConstant(0, getPointerTy())); 3349 } 3350 } 3351 N0 = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, N0); 3352 N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1, 3353 DAG.getConstant(Idx, getPointerTy())); 3354 N1 = DAG.getNode(ISD::SRL, MVT::i32, N1, DAG.getConstant(16, MVT::i8)); 3355 N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1, 3356 DAG.getConstant(Idx+1, getPointerTy())); 3357 return DAG.getNode(ISD::BIT_CONVERT, VT, N0); 3358 } 3359 } 3360 3361 return SDOperand(); 3362} 3363 3364SDOperand 3365X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3366 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); 3367 return DAG.getNode(X86ISD::S2VEC, Op.getValueType(), AnyExt); 3368} 3369 3370// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 3371// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 3372// one of the above mentioned nodes. It has to be wrapped because otherwise 3373// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 3374// be used to form addressing mode. These wrapped nodes will be selected 3375// into MOV32ri. 3376SDOperand 3377X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 3378 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 3379 SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(), 3380 getPointerTy(), 3381 CP->getAlignment()); 3382 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3383 // With PIC, the address is actually $g + Offset. 3384 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3385 !Subtarget->isPICStyleRIPRel()) { 3386 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3387 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3388 Result); 3389 } 3390 3391 return Result; 3392} 3393 3394SDOperand 3395X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 3396 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3397 SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); 3398 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3399 // With PIC, the address is actually $g + Offset. 3400 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3401 !Subtarget->isPICStyleRIPRel()) { 3402 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3403 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3404 Result); 3405 } 3406 3407 // For Darwin & Mingw32, external and weak symbols are indirect, so we want to 3408 // load the value at address GV, not the value of GV itself. This means that 3409 // the GlobalAddress must be in the base or index register of the address, not 3410 // the GV offset field. Platform check is inside GVRequiresExtraLoad() call 3411 // The same applies for external symbols during PIC codegen 3412 if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)) 3413 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, NULL, 0); 3414 3415 return Result; 3416} 3417 3418SDOperand 3419X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { 3420 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 3421 SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 3422 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3423 // With PIC, the address is actually $g + Offset. 3424 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3425 !Subtarget->isPICStyleRIPRel()) { 3426 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3427 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3428 Result); 3429 } 3430 3431 return Result; 3432} 3433 3434SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 3435 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 3436 SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); 3437 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3438 // With PIC, the address is actually $g + Offset. 3439 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3440 !Subtarget->isPICStyleRIPRel()) { 3441 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3442 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3443 Result); 3444 } 3445 3446 return Result; 3447} 3448 3449SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { 3450 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 3451 "Not an i64 shift!"); 3452 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 3453 SDOperand ShOpLo = Op.getOperand(0); 3454 SDOperand ShOpHi = Op.getOperand(1); 3455 SDOperand ShAmt = Op.getOperand(2); 3456 SDOperand Tmp1 = isSRA ? 3457 DAG.getNode(ISD::SRA, MVT::i32, ShOpHi, DAG.getConstant(31, MVT::i8)) : 3458 DAG.getConstant(0, MVT::i32); 3459 3460 SDOperand Tmp2, Tmp3; 3461 if (Op.getOpcode() == ISD::SHL_PARTS) { 3462 Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt); 3463 Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt); 3464 } else { 3465 Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt); 3466 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt); 3467 } 3468 3469 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 3470 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, 3471 DAG.getConstant(32, MVT::i8)); 3472 SDOperand COps[]={DAG.getEntryNode(), AndNode, DAG.getConstant(0, MVT::i8)}; 3473 SDOperand InFlag = DAG.getNode(X86ISD::CMP, VTs, 2, COps, 3).getValue(1); 3474 3475 SDOperand Hi, Lo; 3476 SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); 3477 3478 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag); 3479 SmallVector<SDOperand, 4> Ops; 3480 if (Op.getOpcode() == ISD::SHL_PARTS) { 3481 Ops.push_back(Tmp2); 3482 Ops.push_back(Tmp3); 3483 Ops.push_back(CC); 3484 Ops.push_back(InFlag); 3485 Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3486 InFlag = Hi.getValue(1); 3487 3488 Ops.clear(); 3489 Ops.push_back(Tmp3); 3490 Ops.push_back(Tmp1); 3491 Ops.push_back(CC); 3492 Ops.push_back(InFlag); 3493 Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3494 } else { 3495 Ops.push_back(Tmp2); 3496 Ops.push_back(Tmp3); 3497 Ops.push_back(CC); 3498 Ops.push_back(InFlag); 3499 Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3500 InFlag = Lo.getValue(1); 3501 3502 Ops.clear(); 3503 Ops.push_back(Tmp3); 3504 Ops.push_back(Tmp1); 3505 Ops.push_back(CC); 3506 Ops.push_back(InFlag); 3507 Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3508 } 3509 3510 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32); 3511 Ops.clear(); 3512 Ops.push_back(Lo); 3513 Ops.push_back(Hi); 3514 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size()); 3515} 3516 3517SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 3518 assert(Op.getOperand(0).getValueType() <= MVT::i64 && 3519 Op.getOperand(0).getValueType() >= MVT::i16 && 3520 "Unknown SINT_TO_FP to lower!"); 3521 3522 SDOperand Result; 3523 MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); 3524 unsigned Size = MVT::getSizeInBits(SrcVT)/8; 3525 MachineFunction &MF = DAG.getMachineFunction(); 3526 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 3527 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3528 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), 3529 StackSlot, NULL, 0); 3530 3531 // Build the FILD 3532 SDVTList Tys; 3533 if (X86ScalarSSE) 3534 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 3535 else 3536 Tys = DAG.getVTList(MVT::f64, MVT::Other); 3537 SmallVector<SDOperand, 8> Ops; 3538 Ops.push_back(Chain); 3539 Ops.push_back(StackSlot); 3540 Ops.push_back(DAG.getValueType(SrcVT)); 3541 Result = DAG.getNode(X86ScalarSSE ? X86ISD::FILD_FLAG :X86ISD::FILD, 3542 Tys, &Ops[0], Ops.size()); 3543 3544 if (X86ScalarSSE) { 3545 Chain = Result.getValue(1); 3546 SDOperand InFlag = Result.getValue(2); 3547 3548 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 3549 // shouldn't be necessary except that RFP cannot be live across 3550 // multiple blocks. When stackifier is fixed, they can be uncoupled. 3551 MachineFunction &MF = DAG.getMachineFunction(); 3552 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 3553 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3554 Tys = DAG.getVTList(MVT::Other); 3555 SmallVector<SDOperand, 8> Ops; 3556 Ops.push_back(Chain); 3557 Ops.push_back(Result); 3558 Ops.push_back(StackSlot); 3559 Ops.push_back(DAG.getValueType(Op.getValueType())); 3560 Ops.push_back(InFlag); 3561 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 3562 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, NULL, 0); 3563 } 3564 3565 return Result; 3566} 3567 3568SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 3569 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 && 3570 "Unknown FP_TO_SINT to lower!"); 3571 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 3572 // stack slot. 3573 MachineFunction &MF = DAG.getMachineFunction(); 3574 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; 3575 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 3576 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3577 3578 unsigned Opc; 3579 switch (Op.getValueType()) { 3580 default: assert(0 && "Invalid FP_TO_SINT to lower!"); 3581 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 3582 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 3583 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 3584 } 3585 3586 SDOperand Chain = DAG.getEntryNode(); 3587 SDOperand Value = Op.getOperand(0); 3588 if (X86ScalarSSE) { 3589 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 3590 Chain = DAG.getStore(Chain, Value, StackSlot, NULL, 0); 3591 SDVTList Tys = DAG.getVTList(MVT::f64, MVT::Other); 3592 SDOperand Ops[] = { 3593 Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType()) 3594 }; 3595 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 3596 Chain = Value.getValue(1); 3597 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 3598 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3599 } 3600 3601 // Build the FP_TO_INT*_IN_MEM 3602 SDOperand Ops[] = { Chain, Value, StackSlot }; 3603 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3); 3604 3605 // Load the result. 3606 return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0); 3607} 3608 3609SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { 3610 MVT::ValueType VT = Op.getValueType(); 3611 const Type *OpNTy = MVT::getTypeForValueType(VT); 3612 std::vector<Constant*> CV; 3613 if (VT == MVT::f64) { 3614 CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63)))); 3615 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 3616 } else { 3617 CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31)))); 3618 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 3619 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 3620 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 3621 } 3622 Constant *CS = ConstantStruct::get(CV); 3623 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4); 3624 SDVTList Tys = DAG.getVTList(VT, MVT::Other); 3625 SmallVector<SDOperand, 3> Ops; 3626 Ops.push_back(DAG.getEntryNode()); 3627 Ops.push_back(CPIdx); 3628 Ops.push_back(DAG.getSrcValue(NULL)); 3629 SDOperand Mask = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size()); 3630 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask); 3631} 3632 3633SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { 3634 MVT::ValueType VT = Op.getValueType(); 3635 const Type *OpNTy = MVT::getTypeForValueType(VT); 3636 std::vector<Constant*> CV; 3637 if (VT == MVT::f64) { 3638 CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63))); 3639 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 3640 } else { 3641 CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(1U << 31))); 3642 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 3643 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 3644 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 3645 } 3646 Constant *CS = ConstantStruct::get(CV); 3647 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4); 3648 SDVTList Tys = DAG.getVTList(VT, MVT::Other); 3649 SmallVector<SDOperand, 3> Ops; 3650 Ops.push_back(DAG.getEntryNode()); 3651 Ops.push_back(CPIdx); 3652 Ops.push_back(DAG.getSrcValue(NULL)); 3653 SDOperand Mask = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size()); 3654 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask); 3655} 3656 3657SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { 3658 SDOperand Op0 = Op.getOperand(0); 3659 SDOperand Op1 = Op.getOperand(1); 3660 MVT::ValueType VT = Op.getValueType(); 3661 MVT::ValueType SrcVT = Op1.getValueType(); 3662 const Type *SrcTy = MVT::getTypeForValueType(SrcVT); 3663 3664 // If second operand is smaller, extend it first. 3665 if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) { 3666 Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1); 3667 SrcVT = VT; 3668 } 3669 3670 // First get the sign bit of second operand. 3671 std::vector<Constant*> CV; 3672 if (SrcVT == MVT::f64) { 3673 CV.push_back(ConstantFP::get(SrcTy, BitsToDouble(1ULL << 63))); 3674 CV.push_back(ConstantFP::get(SrcTy, 0.0)); 3675 } else { 3676 CV.push_back(ConstantFP::get(SrcTy, BitsToFloat(1U << 31))); 3677 CV.push_back(ConstantFP::get(SrcTy, 0.0)); 3678 CV.push_back(ConstantFP::get(SrcTy, 0.0)); 3679 CV.push_back(ConstantFP::get(SrcTy, 0.0)); 3680 } 3681 Constant *CS = ConstantStruct::get(CV); 3682 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4); 3683 SDVTList Tys = DAG.getVTList(SrcVT, MVT::Other); 3684 SmallVector<SDOperand, 3> Ops; 3685 Ops.push_back(DAG.getEntryNode()); 3686 Ops.push_back(CPIdx); 3687 Ops.push_back(DAG.getSrcValue(NULL)); 3688 SDOperand Mask1 = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size()); 3689 SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); 3690 3691 // Shift sign bit right or left if the two operands have different types. 3692 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 3693 // Op0 is MVT::f32, Op1 is MVT::f64. 3694 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit); 3695 SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit, 3696 DAG.getConstant(32, MVT::i32)); 3697 SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit); 3698 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit, 3699 DAG.getConstant(0, getPointerTy())); 3700 } 3701 3702 // Clear first operand sign bit. 3703 CV.clear(); 3704 if (VT == MVT::f64) { 3705 CV.push_back(ConstantFP::get(SrcTy, BitsToDouble(~(1ULL << 63)))); 3706 CV.push_back(ConstantFP::get(SrcTy, 0.0)); 3707 } else { 3708 CV.push_back(ConstantFP::get(SrcTy, BitsToFloat(~(1U << 31)))); 3709 CV.push_back(ConstantFP::get(SrcTy, 0.0)); 3710 CV.push_back(ConstantFP::get(SrcTy, 0.0)); 3711 CV.push_back(ConstantFP::get(SrcTy, 0.0)); 3712 } 3713 CS = ConstantStruct::get(CV); 3714 CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4); 3715 Tys = DAG.getVTList(VT, MVT::Other); 3716 Ops.clear(); 3717 Ops.push_back(DAG.getEntryNode()); 3718 Ops.push_back(CPIdx); 3719 Ops.push_back(DAG.getSrcValue(NULL)); 3720 SDOperand Mask2 = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size()); 3721 SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2); 3722 3723 // Or the value with the sign bit. 3724 return DAG.getNode(X86ISD::FOR, VT, Val, SignBit); 3725} 3726 3727SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG, 3728 SDOperand Chain) { 3729 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 3730 SDOperand Cond; 3731 SDOperand Op0 = Op.getOperand(0); 3732 SDOperand Op1 = Op.getOperand(1); 3733 SDOperand CC = Op.getOperand(2); 3734 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 3735 const MVT::ValueType *VTs1 = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 3736 const MVT::ValueType *VTs2 = DAG.getNodeValueTypes(MVT::i8, MVT::Flag); 3737 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); 3738 unsigned X86CC; 3739 3740 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, 3741 Op0, Op1, DAG)) { 3742 SDOperand Ops1[] = { Chain, Op0, Op1 }; 3743 Cond = DAG.getNode(X86ISD::CMP, VTs1, 2, Ops1, 3).getValue(1); 3744 SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond }; 3745 return DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2); 3746 } 3747 3748 assert(isFP && "Illegal integer SetCC!"); 3749 3750 SDOperand COps[] = { Chain, Op0, Op1 }; 3751 Cond = DAG.getNode(X86ISD::CMP, VTs1, 2, COps, 3).getValue(1); 3752 3753 switch (SetCCOpcode) { 3754 default: assert(false && "Illegal floating point SetCC!"); 3755 case ISD::SETOEQ: { // !PF & ZF 3756 SDOperand Ops1[] = { DAG.getConstant(X86::COND_NP, MVT::i8), Cond }; 3757 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops1, 2); 3758 SDOperand Ops2[] = { DAG.getConstant(X86::COND_E, MVT::i8), 3759 Tmp1.getValue(1) }; 3760 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2); 3761 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); 3762 } 3763 case ISD::SETUNE: { // PF | !ZF 3764 SDOperand Ops1[] = { DAG.getConstant(X86::COND_P, MVT::i8), Cond }; 3765 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops1, 2); 3766 SDOperand Ops2[] = { DAG.getConstant(X86::COND_NE, MVT::i8), 3767 Tmp1.getValue(1) }; 3768 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2); 3769 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); 3770 } 3771 } 3772} 3773 3774SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { 3775 bool addTest = true; 3776 SDOperand Chain = DAG.getEntryNode(); 3777 SDOperand Cond = Op.getOperand(0); 3778 SDOperand CC; 3779 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 3780 3781 if (Cond.getOpcode() == ISD::SETCC) 3782 Cond = LowerSETCC(Cond, DAG, Chain); 3783 3784 if (Cond.getOpcode() == X86ISD::SETCC) { 3785 CC = Cond.getOperand(0); 3786 3787 // If condition flag is set by a X86ISD::CMP, then make a copy of it 3788 // (since flag operand cannot be shared). Use it as the condition setting 3789 // operand in place of the X86ISD::SETCC. 3790 // If the X86ISD::SETCC has more than one use, then perhaps it's better 3791 // to use a test instead of duplicating the X86ISD::CMP (for register 3792 // pressure reason)? 3793 SDOperand Cmp = Cond.getOperand(1); 3794 unsigned Opc = Cmp.getOpcode(); 3795 bool IllegalFPCMov = !X86ScalarSSE && 3796 MVT::isFloatingPoint(Op.getValueType()) && 3797 !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 3798 if ((Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) && 3799 !IllegalFPCMov) { 3800 SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) }; 3801 Cond = DAG.getNode(Opc, VTs, 2, Ops, 3); 3802 addTest = false; 3803 } 3804 } 3805 3806 if (addTest) { 3807 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 3808 SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) }; 3809 Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3); 3810 } 3811 3812 VTs = DAG.getNodeValueTypes(Op.getValueType(), MVT::Flag); 3813 SmallVector<SDOperand, 4> Ops; 3814 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 3815 // condition is true. 3816 Ops.push_back(Op.getOperand(2)); 3817 Ops.push_back(Op.getOperand(1)); 3818 Ops.push_back(CC); 3819 Ops.push_back(Cond.getValue(1)); 3820 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3821} 3822 3823SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { 3824 bool addTest = true; 3825 SDOperand Chain = Op.getOperand(0); 3826 SDOperand Cond = Op.getOperand(1); 3827 SDOperand Dest = Op.getOperand(2); 3828 SDOperand CC; 3829 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 3830 3831 if (Cond.getOpcode() == ISD::SETCC) 3832 Cond = LowerSETCC(Cond, DAG, Chain); 3833 3834 if (Cond.getOpcode() == X86ISD::SETCC) { 3835 CC = Cond.getOperand(0); 3836 3837 // If condition flag is set by a X86ISD::CMP, then make a copy of it 3838 // (since flag operand cannot be shared). Use it as the condition setting 3839 // operand in place of the X86ISD::SETCC. 3840 // If the X86ISD::SETCC has more than one use, then perhaps it's better 3841 // to use a test instead of duplicating the X86ISD::CMP (for register 3842 // pressure reason)? 3843 SDOperand Cmp = Cond.getOperand(1); 3844 unsigned Opc = Cmp.getOpcode(); 3845 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) { 3846 SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) }; 3847 Cond = DAG.getNode(Opc, VTs, 2, Ops, 3); 3848 addTest = false; 3849 } 3850 } 3851 3852 if (addTest) { 3853 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 3854 SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) }; 3855 Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3); 3856 } 3857 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), 3858 Cond, Op.getOperand(2), CC, Cond.getValue(1)); 3859} 3860 3861SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { 3862 unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3863 3864 if (Subtarget->is64Bit()) 3865 return LowerX86_64CCCCallTo(Op, DAG, CallingConv); 3866 else 3867 switch (CallingConv) { 3868 default: 3869 assert(0 && "Unsupported calling convention"); 3870 case CallingConv::Fast: 3871 if (EnableFastCC) 3872 return LowerFastCCCallTo(Op, DAG, CallingConv); 3873 // Falls through 3874 case CallingConv::C: 3875 case CallingConv::X86_StdCall: 3876 return LowerCCCCallTo(Op, DAG, CallingConv); 3877 case CallingConv::X86_FastCall: 3878 return LowerFastCCCallTo(Op, DAG, CallingConv); 3879 } 3880} 3881 3882SDOperand 3883X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { 3884 MachineFunction &MF = DAG.getMachineFunction(); 3885 const Function* Fn = MF.getFunction(); 3886 if (Fn->hasExternalLinkage() && 3887 Subtarget->isTargetCygMing() && 3888 Fn->getName() == "main") 3889 MF.getInfo<X86FunctionInfo>()->setForceFramePointer(true); 3890 3891 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3892 if (Subtarget->is64Bit()) 3893 return LowerX86_64CCCArguments(Op, DAG); 3894 else 3895 switch(CC) { 3896 default: 3897 assert(0 && "Unsupported calling convention"); 3898 case CallingConv::Fast: 3899 if (EnableFastCC) { 3900 return LowerFastCCArguments(Op, DAG); 3901 } 3902 // Falls through 3903 case CallingConv::C: 3904 return LowerCCCArguments(Op, DAG); 3905 case CallingConv::X86_StdCall: 3906 MF.getInfo<X86FunctionInfo>()->setDecorationStyle(StdCall); 3907 return LowerCCCArguments(Op, DAG, true); 3908 case CallingConv::X86_FastCall: 3909 MF.getInfo<X86FunctionInfo>()->setDecorationStyle(FastCall); 3910 return LowerFastCCArguments(Op, DAG, true); 3911 } 3912} 3913 3914SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) { 3915 SDOperand InFlag(0, 0); 3916 SDOperand Chain = Op.getOperand(0); 3917 unsigned Align = 3918 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 3919 if (Align == 0) Align = 1; 3920 3921 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 3922 // If not DWORD aligned, call memset if size is less than the threshold. 3923 // It knows how to align to the right boundary first. 3924 if ((Align & 3) != 0 || 3925 (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) { 3926 MVT::ValueType IntPtr = getPointerTy(); 3927 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 3928 TargetLowering::ArgListTy Args; 3929 TargetLowering::ArgListEntry Entry; 3930 Entry.Node = Op.getOperand(1); 3931 Entry.Ty = IntPtrTy; 3932 Entry.isSigned = false; 3933 Entry.isInReg = false; 3934 Entry.isSRet = false; 3935 Args.push_back(Entry); 3936 // Extend the unsigned i8 argument to be an int value for the call. 3937 Entry.Node = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2)); 3938 Entry.Ty = IntPtrTy; 3939 Entry.isSigned = false; 3940 Entry.isInReg = false; 3941 Entry.isSRet = false; 3942 Args.push_back(Entry); 3943 Entry.Node = Op.getOperand(3); 3944 Args.push_back(Entry); 3945 std::pair<SDOperand,SDOperand> CallResult = 3946 LowerCallTo(Chain, Type::VoidTy, false, false, CallingConv::C, false, 3947 DAG.getExternalSymbol("memset", IntPtr), Args, DAG); 3948 return CallResult.second; 3949 } 3950 3951 MVT::ValueType AVT; 3952 SDOperand Count; 3953 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 3954 unsigned BytesLeft = 0; 3955 bool TwoRepStos = false; 3956 if (ValC) { 3957 unsigned ValReg; 3958 uint64_t Val = ValC->getValue() & 255; 3959 3960 // If the value is a constant, then we can potentially use larger sets. 3961 switch (Align & 3) { 3962 case 2: // WORD aligned 3963 AVT = MVT::i16; 3964 ValReg = X86::AX; 3965 Val = (Val << 8) | Val; 3966 break; 3967 case 0: // DWORD aligned 3968 AVT = MVT::i32; 3969 ValReg = X86::EAX; 3970 Val = (Val << 8) | Val; 3971 Val = (Val << 16) | Val; 3972 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned 3973 AVT = MVT::i64; 3974 ValReg = X86::RAX; 3975 Val = (Val << 32) | Val; 3976 } 3977 break; 3978 default: // Byte aligned 3979 AVT = MVT::i8; 3980 ValReg = X86::AL; 3981 Count = Op.getOperand(3); 3982 break; 3983 } 3984 3985 if (AVT > MVT::i8) { 3986 if (I) { 3987 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 3988 Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy()); 3989 BytesLeft = I->getValue() % UBytes; 3990 } else { 3991 assert(AVT >= MVT::i32 && 3992 "Do not use rep;stos if not at least DWORD aligned"); 3993 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 3994 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 3995 TwoRepStos = true; 3996 } 3997 } 3998 3999 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT), 4000 InFlag); 4001 InFlag = Chain.getValue(1); 4002 } else { 4003 AVT = MVT::i8; 4004 Count = Op.getOperand(3); 4005 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag); 4006 InFlag = Chain.getValue(1); 4007 } 4008 4009 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4010 Count, InFlag); 4011 InFlag = Chain.getValue(1); 4012 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4013 Op.getOperand(1), InFlag); 4014 InFlag = Chain.getValue(1); 4015 4016 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4017 SmallVector<SDOperand, 8> Ops; 4018 Ops.push_back(Chain); 4019 Ops.push_back(DAG.getValueType(AVT)); 4020 Ops.push_back(InFlag); 4021 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4022 4023 if (TwoRepStos) { 4024 InFlag = Chain.getValue(1); 4025 Count = Op.getOperand(3); 4026 MVT::ValueType CVT = Count.getValueType(); 4027 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4028 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4029 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4030 Left, InFlag); 4031 InFlag = Chain.getValue(1); 4032 Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4033 Ops.clear(); 4034 Ops.push_back(Chain); 4035 Ops.push_back(DAG.getValueType(MVT::i8)); 4036 Ops.push_back(InFlag); 4037 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4038 } else if (BytesLeft) { 4039 // Issue stores for the last 1 - 7 bytes. 4040 SDOperand Value; 4041 unsigned Val = ValC->getValue() & 255; 4042 unsigned Offset = I->getValue() - BytesLeft; 4043 SDOperand DstAddr = Op.getOperand(1); 4044 MVT::ValueType AddrVT = DstAddr.getValueType(); 4045 if (BytesLeft >= 4) { 4046 Val = (Val << 8) | Val; 4047 Val = (Val << 16) | Val; 4048 Value = DAG.getConstant(Val, MVT::i32); 4049 Chain = DAG.getStore(Chain, Value, 4050 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4051 DAG.getConstant(Offset, AddrVT)), 4052 NULL, 0); 4053 BytesLeft -= 4; 4054 Offset += 4; 4055 } 4056 if (BytesLeft >= 2) { 4057 Value = DAG.getConstant((Val << 8) | Val, MVT::i16); 4058 Chain = DAG.getStore(Chain, Value, 4059 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4060 DAG.getConstant(Offset, AddrVT)), 4061 NULL, 0); 4062 BytesLeft -= 2; 4063 Offset += 2; 4064 } 4065 if (BytesLeft == 1) { 4066 Value = DAG.getConstant(Val, MVT::i8); 4067 Chain = DAG.getStore(Chain, Value, 4068 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4069 DAG.getConstant(Offset, AddrVT)), 4070 NULL, 0); 4071 } 4072 } 4073 4074 return Chain; 4075} 4076 4077SDOperand X86TargetLowering::LowerMEMCPY(SDOperand Op, SelectionDAG &DAG) { 4078 SDOperand Chain = Op.getOperand(0); 4079 unsigned Align = 4080 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 4081 if (Align == 0) Align = 1; 4082 4083 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4084 // If not DWORD aligned, call memcpy if size is less than the threshold. 4085 // It knows how to align to the right boundary first. 4086 if ((Align & 3) != 0 || 4087 (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) { 4088 MVT::ValueType IntPtr = getPointerTy(); 4089 TargetLowering::ArgListTy Args; 4090 TargetLowering::ArgListEntry Entry; 4091 Entry.Ty = getTargetData()->getIntPtrType(); 4092 Entry.isSigned = false; 4093 Entry.isInReg = false; 4094 Entry.isSRet = false; 4095 Entry.Node = Op.getOperand(1); Args.push_back(Entry); 4096 Entry.Node = Op.getOperand(2); Args.push_back(Entry); 4097 Entry.Node = Op.getOperand(3); Args.push_back(Entry); 4098 std::pair<SDOperand,SDOperand> CallResult = 4099 LowerCallTo(Chain, Type::VoidTy, false, false, CallingConv::C, false, 4100 DAG.getExternalSymbol("memcpy", IntPtr), Args, DAG); 4101 return CallResult.second; 4102 } 4103 4104 MVT::ValueType AVT; 4105 SDOperand Count; 4106 unsigned BytesLeft = 0; 4107 bool TwoRepMovs = false; 4108 switch (Align & 3) { 4109 case 2: // WORD aligned 4110 AVT = MVT::i16; 4111 break; 4112 case 0: // DWORD aligned 4113 AVT = MVT::i32; 4114 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned 4115 AVT = MVT::i64; 4116 break; 4117 default: // Byte aligned 4118 AVT = MVT::i8; 4119 Count = Op.getOperand(3); 4120 break; 4121 } 4122 4123 if (AVT > MVT::i8) { 4124 if (I) { 4125 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4126 Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy()); 4127 BytesLeft = I->getValue() % UBytes; 4128 } else { 4129 assert(AVT >= MVT::i32 && 4130 "Do not use rep;movs if not at least DWORD aligned"); 4131 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 4132 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 4133 TwoRepMovs = true; 4134 } 4135 } 4136 4137 SDOperand InFlag(0, 0); 4138 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4139 Count, InFlag); 4140 InFlag = Chain.getValue(1); 4141 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4142 Op.getOperand(1), InFlag); 4143 InFlag = Chain.getValue(1); 4144 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI, 4145 Op.getOperand(2), InFlag); 4146 InFlag = Chain.getValue(1); 4147 4148 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4149 SmallVector<SDOperand, 8> Ops; 4150 Ops.push_back(Chain); 4151 Ops.push_back(DAG.getValueType(AVT)); 4152 Ops.push_back(InFlag); 4153 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4154 4155 if (TwoRepMovs) { 4156 InFlag = Chain.getValue(1); 4157 Count = Op.getOperand(3); 4158 MVT::ValueType CVT = Count.getValueType(); 4159 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4160 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4161 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4162 Left, InFlag); 4163 InFlag = Chain.getValue(1); 4164 Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4165 Ops.clear(); 4166 Ops.push_back(Chain); 4167 Ops.push_back(DAG.getValueType(MVT::i8)); 4168 Ops.push_back(InFlag); 4169 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4170 } else if (BytesLeft) { 4171 // Issue loads and stores for the last 1 - 7 bytes. 4172 unsigned Offset = I->getValue() - BytesLeft; 4173 SDOperand DstAddr = Op.getOperand(1); 4174 MVT::ValueType DstVT = DstAddr.getValueType(); 4175 SDOperand SrcAddr = Op.getOperand(2); 4176 MVT::ValueType SrcVT = SrcAddr.getValueType(); 4177 SDOperand Value; 4178 if (BytesLeft >= 4) { 4179 Value = DAG.getLoad(MVT::i32, Chain, 4180 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4181 DAG.getConstant(Offset, SrcVT)), 4182 NULL, 0); 4183 Chain = Value.getValue(1); 4184 Chain = DAG.getStore(Chain, Value, 4185 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4186 DAG.getConstant(Offset, DstVT)), 4187 NULL, 0); 4188 BytesLeft -= 4; 4189 Offset += 4; 4190 } 4191 if (BytesLeft >= 2) { 4192 Value = DAG.getLoad(MVT::i16, Chain, 4193 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4194 DAG.getConstant(Offset, SrcVT)), 4195 NULL, 0); 4196 Chain = Value.getValue(1); 4197 Chain = DAG.getStore(Chain, Value, 4198 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4199 DAG.getConstant(Offset, DstVT)), 4200 NULL, 0); 4201 BytesLeft -= 2; 4202 Offset += 2; 4203 } 4204 4205 if (BytesLeft == 1) { 4206 Value = DAG.getLoad(MVT::i8, Chain, 4207 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4208 DAG.getConstant(Offset, SrcVT)), 4209 NULL, 0); 4210 Chain = Value.getValue(1); 4211 Chain = DAG.getStore(Chain, Value, 4212 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4213 DAG.getConstant(Offset, DstVT)), 4214 NULL, 0); 4215 } 4216 } 4217 4218 return Chain; 4219} 4220 4221SDOperand 4222X86TargetLowering::LowerREADCYCLCECOUNTER(SDOperand Op, SelectionDAG &DAG) { 4223 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4224 SDOperand TheOp = Op.getOperand(0); 4225 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheOp, 1); 4226 if (Subtarget->is64Bit()) { 4227 SDOperand Copy1 = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); 4228 SDOperand Copy2 = DAG.getCopyFromReg(Copy1.getValue(1), X86::RDX, 4229 MVT::i64, Copy1.getValue(2)); 4230 SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, Copy2, 4231 DAG.getConstant(32, MVT::i8)); 4232 SDOperand Ops[] = { 4233 DAG.getNode(ISD::OR, MVT::i64, Copy1, Tmp), Copy2.getValue(1) 4234 }; 4235 4236 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4237 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2); 4238 } 4239 4240 SDOperand Copy1 = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); 4241 SDOperand Copy2 = DAG.getCopyFromReg(Copy1.getValue(1), X86::EDX, 4242 MVT::i32, Copy1.getValue(2)); 4243 SDOperand Ops[] = { Copy1, Copy2, Copy2.getValue(1) }; 4244 Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 4245 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 3); 4246} 4247 4248SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { 4249 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2)); 4250 4251 if (!Subtarget->is64Bit()) { 4252 // vastart just stores the address of the VarArgsFrameIndex slot into the 4253 // memory location argument. 4254 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4255 return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV->getValue(), 4256 SV->getOffset()); 4257 } 4258 4259 // __va_list_tag: 4260 // gp_offset (0 - 6 * 8) 4261 // fp_offset (48 - 48 + 8 * 16) 4262 // overflow_arg_area (point to parameters coming in memory). 4263 // reg_save_area 4264 SmallVector<SDOperand, 8> MemOps; 4265 SDOperand FIN = Op.getOperand(1); 4266 // Store gp_offset 4267 SDOperand Store = DAG.getStore(Op.getOperand(0), 4268 DAG.getConstant(VarArgsGPOffset, MVT::i32), 4269 FIN, SV->getValue(), SV->getOffset()); 4270 MemOps.push_back(Store); 4271 4272 // Store fp_offset 4273 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 4274 DAG.getConstant(4, getPointerTy())); 4275 Store = DAG.getStore(Op.getOperand(0), 4276 DAG.getConstant(VarArgsFPOffset, MVT::i32), 4277 FIN, SV->getValue(), SV->getOffset()); 4278 MemOps.push_back(Store); 4279 4280 // Store ptr to overflow_arg_area 4281 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 4282 DAG.getConstant(4, getPointerTy())); 4283 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4284 Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV->getValue(), 4285 SV->getOffset()); 4286 MemOps.push_back(Store); 4287 4288 // Store ptr to reg_save_area. 4289 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 4290 DAG.getConstant(8, getPointerTy())); 4291 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 4292 Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV->getValue(), 4293 SV->getOffset()); 4294 MemOps.push_back(Store); 4295 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size()); 4296} 4297 4298SDOperand 4299X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 4300 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); 4301 switch (IntNo) { 4302 default: return SDOperand(); // Don't custom lower most intrinsics. 4303 // Comparison intrinsics. 4304 case Intrinsic::x86_sse_comieq_ss: 4305 case Intrinsic::x86_sse_comilt_ss: 4306 case Intrinsic::x86_sse_comile_ss: 4307 case Intrinsic::x86_sse_comigt_ss: 4308 case Intrinsic::x86_sse_comige_ss: 4309 case Intrinsic::x86_sse_comineq_ss: 4310 case Intrinsic::x86_sse_ucomieq_ss: 4311 case Intrinsic::x86_sse_ucomilt_ss: 4312 case Intrinsic::x86_sse_ucomile_ss: 4313 case Intrinsic::x86_sse_ucomigt_ss: 4314 case Intrinsic::x86_sse_ucomige_ss: 4315 case Intrinsic::x86_sse_ucomineq_ss: 4316 case Intrinsic::x86_sse2_comieq_sd: 4317 case Intrinsic::x86_sse2_comilt_sd: 4318 case Intrinsic::x86_sse2_comile_sd: 4319 case Intrinsic::x86_sse2_comigt_sd: 4320 case Intrinsic::x86_sse2_comige_sd: 4321 case Intrinsic::x86_sse2_comineq_sd: 4322 case Intrinsic::x86_sse2_ucomieq_sd: 4323 case Intrinsic::x86_sse2_ucomilt_sd: 4324 case Intrinsic::x86_sse2_ucomile_sd: 4325 case Intrinsic::x86_sse2_ucomigt_sd: 4326 case Intrinsic::x86_sse2_ucomige_sd: 4327 case Intrinsic::x86_sse2_ucomineq_sd: { 4328 unsigned Opc = 0; 4329 ISD::CondCode CC = ISD::SETCC_INVALID; 4330 switch (IntNo) { 4331 default: break; 4332 case Intrinsic::x86_sse_comieq_ss: 4333 case Intrinsic::x86_sse2_comieq_sd: 4334 Opc = X86ISD::COMI; 4335 CC = ISD::SETEQ; 4336 break; 4337 case Intrinsic::x86_sse_comilt_ss: 4338 case Intrinsic::x86_sse2_comilt_sd: 4339 Opc = X86ISD::COMI; 4340 CC = ISD::SETLT; 4341 break; 4342 case Intrinsic::x86_sse_comile_ss: 4343 case Intrinsic::x86_sse2_comile_sd: 4344 Opc = X86ISD::COMI; 4345 CC = ISD::SETLE; 4346 break; 4347 case Intrinsic::x86_sse_comigt_ss: 4348 case Intrinsic::x86_sse2_comigt_sd: 4349 Opc = X86ISD::COMI; 4350 CC = ISD::SETGT; 4351 break; 4352 case Intrinsic::x86_sse_comige_ss: 4353 case Intrinsic::x86_sse2_comige_sd: 4354 Opc = X86ISD::COMI; 4355 CC = ISD::SETGE; 4356 break; 4357 case Intrinsic::x86_sse_comineq_ss: 4358 case Intrinsic::x86_sse2_comineq_sd: 4359 Opc = X86ISD::COMI; 4360 CC = ISD::SETNE; 4361 break; 4362 case Intrinsic::x86_sse_ucomieq_ss: 4363 case Intrinsic::x86_sse2_ucomieq_sd: 4364 Opc = X86ISD::UCOMI; 4365 CC = ISD::SETEQ; 4366 break; 4367 case Intrinsic::x86_sse_ucomilt_ss: 4368 case Intrinsic::x86_sse2_ucomilt_sd: 4369 Opc = X86ISD::UCOMI; 4370 CC = ISD::SETLT; 4371 break; 4372 case Intrinsic::x86_sse_ucomile_ss: 4373 case Intrinsic::x86_sse2_ucomile_sd: 4374 Opc = X86ISD::UCOMI; 4375 CC = ISD::SETLE; 4376 break; 4377 case Intrinsic::x86_sse_ucomigt_ss: 4378 case Intrinsic::x86_sse2_ucomigt_sd: 4379 Opc = X86ISD::UCOMI; 4380 CC = ISD::SETGT; 4381 break; 4382 case Intrinsic::x86_sse_ucomige_ss: 4383 case Intrinsic::x86_sse2_ucomige_sd: 4384 Opc = X86ISD::UCOMI; 4385 CC = ISD::SETGE; 4386 break; 4387 case Intrinsic::x86_sse_ucomineq_ss: 4388 case Intrinsic::x86_sse2_ucomineq_sd: 4389 Opc = X86ISD::UCOMI; 4390 CC = ISD::SETNE; 4391 break; 4392 } 4393 4394 unsigned X86CC; 4395 SDOperand LHS = Op.getOperand(1); 4396 SDOperand RHS = Op.getOperand(2); 4397 translateX86CC(CC, true, X86CC, LHS, RHS, DAG); 4398 4399 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4400 SDOperand Ops1[] = { DAG.getEntryNode(), LHS, RHS }; 4401 SDOperand Cond = DAG.getNode(Opc, VTs, 2, Ops1, 3); 4402 VTs = DAG.getNodeValueTypes(MVT::i8, MVT::Flag); 4403 SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond }; 4404 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2); 4405 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); 4406 } 4407 } 4408} 4409 4410SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { 4411 // Depths > 0 not supported yet! 4412 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 4413 return SDOperand(); 4414 4415 // Just load the return address 4416 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 4417 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); 4418} 4419 4420SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { 4421 // Depths > 0 not supported yet! 4422 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 4423 return SDOperand(); 4424 4425 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 4426 return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI, 4427 DAG.getConstant(4, getPointerTy())); 4428} 4429 4430/// LowerOperation - Provide custom lowering hooks for some operations. 4431/// 4432SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 4433 switch (Op.getOpcode()) { 4434 default: assert(0 && "Should not custom lower this!"); 4435 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 4436 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 4437 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 4438 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 4439 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 4440 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4441 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 4442 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 4443 case ISD::SHL_PARTS: 4444 case ISD::SRA_PARTS: 4445 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 4446 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 4447 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 4448 case ISD::FABS: return LowerFABS(Op, DAG); 4449 case ISD::FNEG: return LowerFNEG(Op, DAG); 4450 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 4451 case ISD::SETCC: return LowerSETCC(Op, DAG, DAG.getEntryNode()); 4452 case ISD::SELECT: return LowerSELECT(Op, DAG); 4453 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 4454 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 4455 case ISD::CALL: return LowerCALL(Op, DAG); 4456 case ISD::RET: return LowerRET(Op, DAG); 4457 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); 4458 case ISD::MEMSET: return LowerMEMSET(Op, DAG); 4459 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG); 4460 case ISD::READCYCLECOUNTER: return LowerREADCYCLCECOUNTER(Op, DAG); 4461 case ISD::VASTART: return LowerVASTART(Op, DAG); 4462 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 4463 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 4464 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 4465 } 4466 return SDOperand(); 4467} 4468 4469const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 4470 switch (Opcode) { 4471 default: return NULL; 4472 case X86ISD::SHLD: return "X86ISD::SHLD"; 4473 case X86ISD::SHRD: return "X86ISD::SHRD"; 4474 case X86ISD::FAND: return "X86ISD::FAND"; 4475 case X86ISD::FOR: return "X86ISD::FOR"; 4476 case X86ISD::FXOR: return "X86ISD::FXOR"; 4477 case X86ISD::FSRL: return "X86ISD::FSRL"; 4478 case X86ISD::FILD: return "X86ISD::FILD"; 4479 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 4480 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 4481 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 4482 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 4483 case X86ISD::FLD: return "X86ISD::FLD"; 4484 case X86ISD::FST: return "X86ISD::FST"; 4485 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT"; 4486 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT"; 4487 case X86ISD::CALL: return "X86ISD::CALL"; 4488 case X86ISD::TAILCALL: return "X86ISD::TAILCALL"; 4489 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 4490 case X86ISD::CMP: return "X86ISD::CMP"; 4491 case X86ISD::COMI: return "X86ISD::COMI"; 4492 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 4493 case X86ISD::SETCC: return "X86ISD::SETCC"; 4494 case X86ISD::CMOV: return "X86ISD::CMOV"; 4495 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 4496 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 4497 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 4498 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 4499 case X86ISD::LOAD_PACK: return "X86ISD::LOAD_PACK"; 4500 case X86ISD::LOAD_UA: return "X86ISD::LOAD_UA"; 4501 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 4502 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 4503 case X86ISD::S2VEC: return "X86ISD::S2VEC"; 4504 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 4505 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 4506 case X86ISD::FMAX: return "X86ISD::FMAX"; 4507 case X86ISD::FMIN: return "X86ISD::FMIN"; 4508 } 4509} 4510 4511/// isLegalAddressImmediate - Return true if the integer value or 4512/// GlobalValue can be used as the offset of the target addressing mode. 4513bool X86TargetLowering::isLegalAddressImmediate(int64_t V) const { 4514 // X86 allows a sign-extended 32-bit immediate field. 4515 return (V > -(1LL << 32) && V < (1LL << 32)-1); 4516} 4517 4518bool X86TargetLowering::isLegalAddressImmediate(GlobalValue *GV) const { 4519 // In 64-bit mode, GV is 64-bit so it won't fit in the 32-bit displacement 4520 // field unless we are in small code model. 4521 if (Subtarget->is64Bit() && 4522 getTargetMachine().getCodeModel() != CodeModel::Small) 4523 return false; 4524 4525 return (!Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)); 4526} 4527 4528/// isShuffleMaskLegal - Targets can use this to indicate that they only 4529/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 4530/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 4531/// are assumed to be legal. 4532bool 4533X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { 4534 // Only do shuffles on 128-bit vector types for now. 4535 if (MVT::getSizeInBits(VT) == 64) return false; 4536 return (Mask.Val->getNumOperands() <= 4 || 4537 isSplatMask(Mask.Val) || 4538 isPSHUFHW_PSHUFLWMask(Mask.Val) || 4539 X86::isUNPCKLMask(Mask.Val) || 4540 X86::isUNPCKL_v_undef_Mask(Mask.Val) || 4541 X86::isUNPCKHMask(Mask.Val)); 4542} 4543 4544bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps, 4545 MVT::ValueType EVT, 4546 SelectionDAG &DAG) const { 4547 unsigned NumElts = BVOps.size(); 4548 // Only do shuffles on 128-bit vector types for now. 4549 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; 4550 if (NumElts == 2) return true; 4551 if (NumElts == 4) { 4552 return (isMOVLMask(&BVOps[0], 4) || 4553 isCommutedMOVL(&BVOps[0], 4, true) || 4554 isSHUFPMask(&BVOps[0], 4) || 4555 isCommutedSHUFP(&BVOps[0], 4)); 4556 } 4557 return false; 4558} 4559 4560//===----------------------------------------------------------------------===// 4561// X86 Scheduler Hooks 4562//===----------------------------------------------------------------------===// 4563 4564MachineBasicBlock * 4565X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 4566 MachineBasicBlock *BB) { 4567 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4568 switch (MI->getOpcode()) { 4569 default: assert(false && "Unexpected instr type to insert"); 4570 case X86::CMOV_FR32: 4571 case X86::CMOV_FR64: 4572 case X86::CMOV_V4F32: 4573 case X86::CMOV_V2F64: 4574 case X86::CMOV_V2I64: { 4575 // To "insert" a SELECT_CC instruction, we actually have to insert the 4576 // diamond control-flow pattern. The incoming instruction knows the 4577 // destination vreg to set, the condition code register to branch on, the 4578 // true/false values to select between, and a branch opcode to use. 4579 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4580 ilist<MachineBasicBlock>::iterator It = BB; 4581 ++It; 4582 4583 // thisMBB: 4584 // ... 4585 // TrueVal = ... 4586 // cmpTY ccX, r1, r2 4587 // bCC copy1MBB 4588 // fallthrough --> copy0MBB 4589 MachineBasicBlock *thisMBB = BB; 4590 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 4591 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 4592 unsigned Opc = 4593 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 4594 BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB); 4595 MachineFunction *F = BB->getParent(); 4596 F->getBasicBlockList().insert(It, copy0MBB); 4597 F->getBasicBlockList().insert(It, sinkMBB); 4598 // Update machine-CFG edges by first adding all successors of the current 4599 // block to the new block which will contain the Phi node for the select. 4600 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 4601 e = BB->succ_end(); i != e; ++i) 4602 sinkMBB->addSuccessor(*i); 4603 // Next, remove all successors of the current block, and add the true 4604 // and fallthrough blocks as its successors. 4605 while(!BB->succ_empty()) 4606 BB->removeSuccessor(BB->succ_begin()); 4607 BB->addSuccessor(copy0MBB); 4608 BB->addSuccessor(sinkMBB); 4609 4610 // copy0MBB: 4611 // %FalseValue = ... 4612 // # fallthrough to sinkMBB 4613 BB = copy0MBB; 4614 4615 // Update machine-CFG edges 4616 BB->addSuccessor(sinkMBB); 4617 4618 // sinkMBB: 4619 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 4620 // ... 4621 BB = sinkMBB; 4622 BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg()) 4623 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 4624 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 4625 4626 delete MI; // The pseudo instruction is gone now. 4627 return BB; 4628 } 4629 4630 case X86::FP_TO_INT16_IN_MEM: 4631 case X86::FP_TO_INT32_IN_MEM: 4632 case X86::FP_TO_INT64_IN_MEM: { 4633 // Change the floating point control register to use "round towards zero" 4634 // mode when truncating to an integer value. 4635 MachineFunction *F = BB->getParent(); 4636 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2); 4637 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx); 4638 4639 // Load the old value of the high byte of the control word... 4640 unsigned OldCW = 4641 F->getSSARegMap()->createVirtualRegister(X86::GR16RegisterClass); 4642 addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx); 4643 4644 // Set the high part to be round to zero... 4645 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx) 4646 .addImm(0xC7F); 4647 4648 // Reload the modified control word now... 4649 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 4650 4651 // Restore the memory image of control word to original value 4652 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx) 4653 .addReg(OldCW); 4654 4655 // Get the X86 opcode to use. 4656 unsigned Opc; 4657 switch (MI->getOpcode()) { 4658 default: assert(0 && "illegal opcode!"); 4659 case X86::FP_TO_INT16_IN_MEM: Opc = X86::FpIST16m; break; 4660 case X86::FP_TO_INT32_IN_MEM: Opc = X86::FpIST32m; break; 4661 case X86::FP_TO_INT64_IN_MEM: Opc = X86::FpIST64m; break; 4662 } 4663 4664 X86AddressMode AM; 4665 MachineOperand &Op = MI->getOperand(0); 4666 if (Op.isRegister()) { 4667 AM.BaseType = X86AddressMode::RegBase; 4668 AM.Base.Reg = Op.getReg(); 4669 } else { 4670 AM.BaseType = X86AddressMode::FrameIndexBase; 4671 AM.Base.FrameIndex = Op.getFrameIndex(); 4672 } 4673 Op = MI->getOperand(1); 4674 if (Op.isImmediate()) 4675 AM.Scale = Op.getImm(); 4676 Op = MI->getOperand(2); 4677 if (Op.isImmediate()) 4678 AM.IndexReg = Op.getImm(); 4679 Op = MI->getOperand(3); 4680 if (Op.isGlobalAddress()) { 4681 AM.GV = Op.getGlobal(); 4682 } else { 4683 AM.Disp = Op.getImm(); 4684 } 4685 addFullAddress(BuildMI(BB, TII->get(Opc)), AM) 4686 .addReg(MI->getOperand(4).getReg()); 4687 4688 // Reload the original control word now. 4689 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 4690 4691 delete MI; // The pseudo instruction is gone now. 4692 return BB; 4693 } 4694 } 4695} 4696 4697//===----------------------------------------------------------------------===// 4698// X86 Optimization Hooks 4699//===----------------------------------------------------------------------===// 4700 4701void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 4702 uint64_t Mask, 4703 uint64_t &KnownZero, 4704 uint64_t &KnownOne, 4705 unsigned Depth) const { 4706 unsigned Opc = Op.getOpcode(); 4707 assert((Opc >= ISD::BUILTIN_OP_END || 4708 Opc == ISD::INTRINSIC_WO_CHAIN || 4709 Opc == ISD::INTRINSIC_W_CHAIN || 4710 Opc == ISD::INTRINSIC_VOID) && 4711 "Should use MaskedValueIsZero if you don't know whether Op" 4712 " is a target node!"); 4713 4714 KnownZero = KnownOne = 0; // Don't know anything. 4715 switch (Opc) { 4716 default: break; 4717 case X86ISD::SETCC: 4718 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL); 4719 break; 4720 } 4721} 4722 4723/// getShuffleScalarElt - Returns the scalar element that will make up the ith 4724/// element of the result of the vector shuffle. 4725static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) { 4726 MVT::ValueType VT = N->getValueType(0); 4727 SDOperand PermMask = N->getOperand(2); 4728 unsigned NumElems = PermMask.getNumOperands(); 4729 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1); 4730 i %= NumElems; 4731 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4732 return (i == 0) 4733 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT)); 4734 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) { 4735 SDOperand Idx = PermMask.getOperand(i); 4736 if (Idx.getOpcode() == ISD::UNDEF) 4737 return DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT)); 4738 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG); 4739 } 4740 return SDOperand(); 4741} 4742 4743/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 4744/// node is a GlobalAddress + an offset. 4745static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) { 4746 unsigned Opc = N->getOpcode(); 4747 if (Opc == X86ISD::Wrapper) { 4748 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) { 4749 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 4750 return true; 4751 } 4752 } else if (Opc == ISD::ADD) { 4753 SDOperand N1 = N->getOperand(0); 4754 SDOperand N2 = N->getOperand(1); 4755 if (isGAPlusOffset(N1.Val, GA, Offset)) { 4756 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 4757 if (V) { 4758 Offset += V->getSignExtended(); 4759 return true; 4760 } 4761 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 4762 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 4763 if (V) { 4764 Offset += V->getSignExtended(); 4765 return true; 4766 } 4767 } 4768 } 4769 return false; 4770} 4771 4772/// isConsecutiveLoad - Returns true if N is loading from an address of Base 4773/// + Dist * Size. 4774static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size, 4775 MachineFrameInfo *MFI) { 4776 if (N->getOperand(0).Val != Base->getOperand(0).Val) 4777 return false; 4778 4779 SDOperand Loc = N->getOperand(1); 4780 SDOperand BaseLoc = Base->getOperand(1); 4781 if (Loc.getOpcode() == ISD::FrameIndex) { 4782 if (BaseLoc.getOpcode() != ISD::FrameIndex) 4783 return false; 4784 int FI = dyn_cast<FrameIndexSDNode>(Loc)->getIndex(); 4785 int BFI = dyn_cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 4786 int FS = MFI->getObjectSize(FI); 4787 int BFS = MFI->getObjectSize(BFI); 4788 if (FS != BFS || FS != Size) return false; 4789 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size); 4790 } else { 4791 GlobalValue *GV1 = NULL; 4792 GlobalValue *GV2 = NULL; 4793 int64_t Offset1 = 0; 4794 int64_t Offset2 = 0; 4795 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 4796 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 4797 if (isGA1 && isGA2 && GV1 == GV2) 4798 return Offset1 == (Offset2 + Dist*Size); 4799 } 4800 4801 return false; 4802} 4803 4804static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI, 4805 const X86Subtarget *Subtarget) { 4806 GlobalValue *GV; 4807 int64_t Offset; 4808 if (isGAPlusOffset(Base, GV, Offset)) 4809 return (GV->getAlignment() >= 16 && (Offset % 16) == 0); 4810 else { 4811 assert(Base->getOpcode() == ISD::FrameIndex && "Unexpected base node!"); 4812 int BFI = dyn_cast<FrameIndexSDNode>(Base)->getIndex(); 4813 if (BFI < 0) 4814 // Fixed objects do not specify alignment, however the offsets are known. 4815 return ((Subtarget->getStackAlignment() % 16) == 0 && 4816 (MFI->getObjectOffset(BFI) % 16) == 0); 4817 else 4818 return MFI->getObjectAlignment(BFI) >= 16; 4819 } 4820 return false; 4821} 4822 4823 4824/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 4825/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 4826/// if the load addresses are consecutive, non-overlapping, and in the right 4827/// order. 4828static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 4829 const X86Subtarget *Subtarget) { 4830 MachineFunction &MF = DAG.getMachineFunction(); 4831 MachineFrameInfo *MFI = MF.getFrameInfo(); 4832 MVT::ValueType VT = N->getValueType(0); 4833 MVT::ValueType EVT = MVT::getVectorBaseType(VT); 4834 SDOperand PermMask = N->getOperand(2); 4835 int NumElems = (int)PermMask.getNumOperands(); 4836 SDNode *Base = NULL; 4837 for (int i = 0; i < NumElems; ++i) { 4838 SDOperand Idx = PermMask.getOperand(i); 4839 if (Idx.getOpcode() == ISD::UNDEF) { 4840 if (!Base) return SDOperand(); 4841 } else { 4842 SDOperand Arg = 4843 getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG); 4844 if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val)) 4845 return SDOperand(); 4846 if (!Base) 4847 Base = Arg.Val; 4848 else if (!isConsecutiveLoad(Arg.Val, Base, 4849 i, MVT::getSizeInBits(EVT)/8,MFI)) 4850 return SDOperand(); 4851 } 4852 } 4853 4854 bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget); 4855 if (isAlign16) { 4856 LoadSDNode *LD = cast<LoadSDNode>(Base); 4857 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 4858 LD->getSrcValueOffset()); 4859 } else { 4860 // Just use movups, it's shorter. 4861 SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other); 4862 SmallVector<SDOperand, 3> Ops; 4863 Ops.push_back(Base->getOperand(0)); 4864 Ops.push_back(Base->getOperand(1)); 4865 Ops.push_back(Base->getOperand(2)); 4866 return DAG.getNode(ISD::BIT_CONVERT, VT, 4867 DAG.getNode(X86ISD::LOAD_UA, Tys, &Ops[0], Ops.size())); 4868 } 4869} 4870 4871/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 4872static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 4873 const X86Subtarget *Subtarget) { 4874 SDOperand Cond = N->getOperand(0); 4875 4876 // If we have SSE[12] support, try to form min/max nodes. 4877 if (Subtarget->hasSSE2() && 4878 (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) { 4879 if (Cond.getOpcode() == ISD::SETCC) { 4880 // Get the LHS/RHS of the select. 4881 SDOperand LHS = N->getOperand(1); 4882 SDOperand RHS = N->getOperand(2); 4883 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 4884 4885 unsigned Opcode = 0; 4886 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) { 4887 switch (CC) { 4888 default: break; 4889 case ISD::SETOLE: // (X <= Y) ? X : Y -> min 4890 case ISD::SETULE: 4891 case ISD::SETLE: 4892 if (!UnsafeFPMath) break; 4893 // FALL THROUGH. 4894 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min 4895 case ISD::SETLT: 4896 Opcode = X86ISD::FMIN; 4897 break; 4898 4899 case ISD::SETOGT: // (X > Y) ? X : Y -> max 4900 case ISD::SETUGT: 4901 case ISD::SETGT: 4902 if (!UnsafeFPMath) break; 4903 // FALL THROUGH. 4904 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max 4905 case ISD::SETGE: 4906 Opcode = X86ISD::FMAX; 4907 break; 4908 } 4909 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) { 4910 switch (CC) { 4911 default: break; 4912 case ISD::SETOGT: // (X > Y) ? Y : X -> min 4913 case ISD::SETUGT: 4914 case ISD::SETGT: 4915 if (!UnsafeFPMath) break; 4916 // FALL THROUGH. 4917 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min 4918 case ISD::SETGE: 4919 Opcode = X86ISD::FMIN; 4920 break; 4921 4922 case ISD::SETOLE: // (X <= Y) ? Y : X -> max 4923 case ISD::SETULE: 4924 case ISD::SETLE: 4925 if (!UnsafeFPMath) break; 4926 // FALL THROUGH. 4927 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max 4928 case ISD::SETLT: 4929 Opcode = X86ISD::FMAX; 4930 break; 4931 } 4932 } 4933 4934 if (Opcode) 4935 return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS); 4936 } 4937 4938 } 4939 4940 return SDOperand(); 4941} 4942 4943 4944SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, 4945 DAGCombinerInfo &DCI) const { 4946 SelectionDAG &DAG = DCI.DAG; 4947 switch (N->getOpcode()) { 4948 default: break; 4949 case ISD::VECTOR_SHUFFLE: 4950 return PerformShuffleCombine(N, DAG, Subtarget); 4951 case ISD::SELECT: 4952 return PerformSELECTCombine(N, DAG, Subtarget); 4953 } 4954 4955 return SDOperand(); 4956} 4957 4958//===----------------------------------------------------------------------===// 4959// X86 Inline Assembly Support 4960//===----------------------------------------------------------------------===// 4961 4962/// getConstraintType - Given a constraint letter, return the type of 4963/// constraint it is for this target. 4964X86TargetLowering::ConstraintType 4965X86TargetLowering::getConstraintType(char ConstraintLetter) const { 4966 switch (ConstraintLetter) { 4967 case 'A': 4968 case 'r': 4969 case 'R': 4970 case 'l': 4971 case 'q': 4972 case 'Q': 4973 case 'x': 4974 case 'Y': 4975 return C_RegisterClass; 4976 default: return TargetLowering::getConstraintType(ConstraintLetter); 4977 } 4978} 4979 4980/// isOperandValidForConstraint - Return the specified operand (possibly 4981/// modified) if the specified SDOperand is valid for the specified target 4982/// constraint letter, otherwise return null. 4983SDOperand X86TargetLowering:: 4984isOperandValidForConstraint(SDOperand Op, char Constraint, SelectionDAG &DAG) { 4985 switch (Constraint) { 4986 default: break; 4987 case 'i': 4988 // Literal immediates are always ok. 4989 if (isa<ConstantSDNode>(Op)) return Op; 4990 4991 // If we are in non-pic codegen mode, we allow the address of a global to 4992 // be used with 'i'. 4993 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) { 4994 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 4995 return SDOperand(0, 0); 4996 4997 if (GA->getOpcode() != ISD::TargetGlobalAddress) 4998 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), 4999 GA->getOffset()); 5000 return Op; 5001 } 5002 5003 // Otherwise, not valid for this mode. 5004 return SDOperand(0, 0); 5005 } 5006 return TargetLowering::isOperandValidForConstraint(Op, Constraint, DAG); 5007} 5008 5009 5010std::vector<unsigned> X86TargetLowering:: 5011getRegClassForInlineAsmConstraint(const std::string &Constraint, 5012 MVT::ValueType VT) const { 5013 if (Constraint.size() == 1) { 5014 // FIXME: not handling fp-stack yet! 5015 // FIXME: not handling MMX registers yet ('y' constraint). 5016 switch (Constraint[0]) { // GCC X86 Constraint Letters 5017 default: break; // Unknown constraint letter 5018 case 'A': // EAX/EDX 5019 if (VT == MVT::i32 || VT == MVT::i64) 5020 return make_vector<unsigned>(X86::EAX, X86::EDX, 0); 5021 break; 5022 case 'r': // GENERAL_REGS 5023 case 'R': // LEGACY_REGS 5024 if (VT == MVT::i64 && Subtarget->is64Bit()) 5025 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 5026 X86::RSI, X86::RDI, X86::RBP, X86::RSP, 5027 X86::R8, X86::R9, X86::R10, X86::R11, 5028 X86::R12, X86::R13, X86::R14, X86::R15, 0); 5029 if (VT == MVT::i32) 5030 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 5031 X86::ESI, X86::EDI, X86::EBP, X86::ESP, 0); 5032 else if (VT == MVT::i16) 5033 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 5034 X86::SI, X86::DI, X86::BP, X86::SP, 0); 5035 else if (VT == MVT::i8) 5036 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0); 5037 break; 5038 case 'l': // INDEX_REGS 5039 if (VT == MVT::i32) 5040 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 5041 X86::ESI, X86::EDI, X86::EBP, 0); 5042 else if (VT == MVT::i16) 5043 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 5044 X86::SI, X86::DI, X86::BP, 0); 5045 else if (VT == MVT::i8) 5046 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0); 5047 break; 5048 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode) 5049 case 'Q': // Q_REGS 5050 if (VT == MVT::i32) 5051 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 5052 else if (VT == MVT::i16) 5053 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 5054 else if (VT == MVT::i8) 5055 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0); 5056 break; 5057 case 'x': // SSE_REGS if SSE1 allowed 5058 if (Subtarget->hasSSE1()) 5059 return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 5060 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7, 5061 0); 5062 return std::vector<unsigned>(); 5063 case 'Y': // SSE_REGS if SSE2 allowed 5064 if (Subtarget->hasSSE2()) 5065 return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 5066 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7, 5067 0); 5068 return std::vector<unsigned>(); 5069 } 5070 } 5071 5072 return std::vector<unsigned>(); 5073} 5074 5075std::pair<unsigned, const TargetRegisterClass*> 5076X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 5077 MVT::ValueType VT) const { 5078 // Use the default implementation in TargetLowering to convert the register 5079 // constraint into a member of a register class. 5080 std::pair<unsigned, const TargetRegisterClass*> Res; 5081 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 5082 5083 // Not found as a standard register? 5084 if (Res.second == 0) { 5085 // GCC calls "st(0)" just plain "st". 5086 if (StringsEqualNoCase("{st}", Constraint)) { 5087 Res.first = X86::ST0; 5088 Res.second = X86::RSTRegisterClass; 5089 } 5090 5091 return Res; 5092 } 5093 5094 // Otherwise, check to see if this is a register class of the wrong value 5095 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 5096 // turn into {ax},{dx}. 5097 if (Res.second->hasType(VT)) 5098 return Res; // Correct type already, nothing to do. 5099 5100 // All of the single-register GCC register classes map their values onto 5101 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 5102 // really want an 8-bit or 32-bit register, map to the appropriate register 5103 // class and return the appropriate register. 5104 if (Res.second != X86::GR16RegisterClass) 5105 return Res; 5106 5107 if (VT == MVT::i8) { 5108 unsigned DestReg = 0; 5109 switch (Res.first) { 5110 default: break; 5111 case X86::AX: DestReg = X86::AL; break; 5112 case X86::DX: DestReg = X86::DL; break; 5113 case X86::CX: DestReg = X86::CL; break; 5114 case X86::BX: DestReg = X86::BL; break; 5115 } 5116 if (DestReg) { 5117 Res.first = DestReg; 5118 Res.second = Res.second = X86::GR8RegisterClass; 5119 } 5120 } else if (VT == MVT::i32) { 5121 unsigned DestReg = 0; 5122 switch (Res.first) { 5123 default: break; 5124 case X86::AX: DestReg = X86::EAX; break; 5125 case X86::DX: DestReg = X86::EDX; break; 5126 case X86::CX: DestReg = X86::ECX; break; 5127 case X86::BX: DestReg = X86::EBX; break; 5128 case X86::SI: DestReg = X86::ESI; break; 5129 case X86::DI: DestReg = X86::EDI; break; 5130 case X86::BP: DestReg = X86::EBP; break; 5131 case X86::SP: DestReg = X86::ESP; break; 5132 } 5133 if (DestReg) { 5134 Res.first = DestReg; 5135 Res.second = Res.second = X86::GR32RegisterClass; 5136 } 5137 } else if (VT == MVT::i64) { 5138 unsigned DestReg = 0; 5139 switch (Res.first) { 5140 default: break; 5141 case X86::AX: DestReg = X86::RAX; break; 5142 case X86::DX: DestReg = X86::RDX; break; 5143 case X86::CX: DestReg = X86::RCX; break; 5144 case X86::BX: DestReg = X86::RBX; break; 5145 case X86::SI: DestReg = X86::RSI; break; 5146 case X86::DI: DestReg = X86::RDI; break; 5147 case X86::BP: DestReg = X86::RBP; break; 5148 case X86::SP: DestReg = X86::RSP; break; 5149 } 5150 if (DestReg) { 5151 Res.first = DestReg; 5152 Res.second = Res.second = X86::GR64RegisterClass; 5153 } 5154 } 5155 5156 return Res; 5157} 5158