X86ISelLowering.cpp revision 1242d2809311db22712e2dededb4d6904948f45f
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by Chris Lattner and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86ISelLowering.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86TargetMachine.h" 20#include "llvm/CallingConv.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/ADT/VectorExtras.h" 27#include "llvm/Analysis/ScalarEvolutionExpressions.h" 28#include "llvm/CodeGen/CallingConvLower.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineFunction.h" 31#include "llvm/CodeGen/MachineInstrBuilder.h" 32#include "llvm/CodeGen/SelectionDAG.h" 33#include "llvm/CodeGen/SSARegMap.h" 34#include "llvm/Support/MathExtras.h" 35#include "llvm/Target/TargetOptions.h" 36#include "llvm/ADT/StringExtras.h" 37#include "llvm/ParameterAttributes.h" 38using namespace llvm; 39 40X86TargetLowering::X86TargetLowering(TargetMachine &TM) 41 : TargetLowering(TM) { 42 Subtarget = &TM.getSubtarget<X86Subtarget>(); 43 X86ScalarSSE = Subtarget->hasSSE2(); 44 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 45 46 RegInfo = TM.getRegisterInfo(); 47 48 // Set up the TargetLowering object. 49 50 // X86 is weird, it always uses i8 for shift amounts and setcc results. 51 setShiftAmountType(MVT::i8); 52 setSetCCResultType(MVT::i8); 53 setSetCCResultContents(ZeroOrOneSetCCResult); 54 setSchedulingPreference(SchedulingForRegPressure); 55 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0 56 setStackPointerRegisterToSaveRestore(X86StackPtr); 57 58 if (Subtarget->isTargetDarwin()) { 59 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 60 setUseUnderscoreSetJmp(false); 61 setUseUnderscoreLongJmp(false); 62 } else if (Subtarget->isTargetMingw()) { 63 // MS runtime is weird: it exports _setjmp, but longjmp! 64 setUseUnderscoreSetJmp(true); 65 setUseUnderscoreLongJmp(false); 66 } else { 67 setUseUnderscoreSetJmp(true); 68 setUseUnderscoreLongJmp(true); 69 } 70 71 // Set up the register classes. 72 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 73 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 74 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 75 if (Subtarget->is64Bit()) 76 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 77 78 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand); 79 80 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 81 // operation. 82 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 83 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 84 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 85 86 if (Subtarget->is64Bit()) { 87 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 88 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 89 } else { 90 if (X86ScalarSSE) 91 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP. 92 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand); 93 else 94 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 95 } 96 97 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 98 // this operation. 99 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 100 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 101 // SSE has no i16 to fp conversion, only i32 102 if (X86ScalarSSE) { 103 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 104 // f32 and f64 cases are Legal, f80 case is not 105 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 106 } else { 107 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 108 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 109 } 110 111 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 112 // are Legal, f80 is custom lowered. 113 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 114 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 115 116 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 117 // this operation. 118 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 119 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 120 121 if (X86ScalarSSE) { 122 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 123 // f32 and f64 cases are Legal, f80 case is not 124 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 125 } else { 126 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 127 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 128 } 129 130 // Handle FP_TO_UINT by promoting the destination to a larger signed 131 // conversion. 132 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 133 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 134 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 135 136 if (Subtarget->is64Bit()) { 137 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 138 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 139 } else { 140 if (X86ScalarSSE && !Subtarget->hasSSE3()) 141 // Expand FP_TO_UINT into a select. 142 // FIXME: We would like to use a Custom expander here eventually to do 143 // the optimal thing for SSE vs. the default expansion in the legalizer. 144 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 145 else 146 // With SSE3 we can use fisttpll to convert to a signed i64. 147 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 148 } 149 150 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 151 if (!X86ScalarSSE) { 152 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 153 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 154 } 155 156 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 157 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 158 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 159 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 160 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); 161 if (Subtarget->is64Bit()) 162 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 163 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 164 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 165 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 166 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 167 setOperationAction(ISD::FREM , MVT::f64 , Expand); 168 169 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 170 setOperationAction(ISD::CTTZ , MVT::i8 , Expand); 171 setOperationAction(ISD::CTLZ , MVT::i8 , Expand); 172 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 173 setOperationAction(ISD::CTTZ , MVT::i16 , Expand); 174 setOperationAction(ISD::CTLZ , MVT::i16 , Expand); 175 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 176 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 177 setOperationAction(ISD::CTLZ , MVT::i32 , Expand); 178 if (Subtarget->is64Bit()) { 179 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 180 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 181 setOperationAction(ISD::CTLZ , MVT::i64 , Expand); 182 } 183 184 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 185 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 186 187 // These should be promoted to a larger select which is supported. 188 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 189 setOperationAction(ISD::SELECT , MVT::i8 , Promote); 190 // X86 wants to expand cmov itself. 191 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 192 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 193 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 194 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 195 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 196 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 197 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 198 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 199 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 200 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 201 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 202 if (Subtarget->is64Bit()) { 203 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 204 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 205 } 206 // X86 ret instruction may pop stack. 207 setOperationAction(ISD::RET , MVT::Other, Custom); 208 if (!Subtarget->is64Bit()) 209 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 210 211 // Darwin ABI issue. 212 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 213 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 214 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 215 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 216 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 217 if (Subtarget->is64Bit()) { 218 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 219 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 220 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 221 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 222 } 223 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 224 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 225 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 226 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 227 // X86 wants to expand memset / memcpy itself. 228 setOperationAction(ISD::MEMSET , MVT::Other, Custom); 229 setOperationAction(ISD::MEMCPY , MVT::Other, Custom); 230 231 // We don't have line number support yet. 232 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 233 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 234 // FIXME - use subtarget debug flags 235 if (!Subtarget->isTargetDarwin() && 236 !Subtarget->isTargetELF() && 237 !Subtarget->isTargetCygMing()) 238 setOperationAction(ISD::LABEL, MVT::Other, Expand); 239 240 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 241 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 242 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 243 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 244 if (Subtarget->is64Bit()) { 245 // FIXME: Verify 246 setExceptionPointerRegister(X86::RAX); 247 setExceptionSelectorRegister(X86::RDX); 248 } else { 249 setExceptionPointerRegister(X86::EAX); 250 setExceptionSelectorRegister(X86::EDX); 251 } 252 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 253 254 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 255 256 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 257 setOperationAction(ISD::VASTART , MVT::Other, Custom); 258 setOperationAction(ISD::VAARG , MVT::Other, Expand); 259 setOperationAction(ISD::VAEND , MVT::Other, Expand); 260 if (Subtarget->is64Bit()) 261 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 262 else 263 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 264 265 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 266 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 267 if (Subtarget->is64Bit()) 268 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 269 if (Subtarget->isTargetCygMing()) 270 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 271 else 272 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 273 274 if (X86ScalarSSE) { 275 // Set up the FP register classes. 276 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 277 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 278 279 // Use ANDPD to simulate FABS. 280 setOperationAction(ISD::FABS , MVT::f64, Custom); 281 setOperationAction(ISD::FABS , MVT::f32, Custom); 282 283 // Use XORP to simulate FNEG. 284 setOperationAction(ISD::FNEG , MVT::f64, Custom); 285 setOperationAction(ISD::FNEG , MVT::f32, Custom); 286 287 // Use ANDPD and ORPD to simulate FCOPYSIGN. 288 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 289 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 290 291 // We don't support sin/cos/fmod 292 setOperationAction(ISD::FSIN , MVT::f64, Expand); 293 setOperationAction(ISD::FCOS , MVT::f64, Expand); 294 setOperationAction(ISD::FREM , MVT::f64, Expand); 295 setOperationAction(ISD::FSIN , MVT::f32, Expand); 296 setOperationAction(ISD::FCOS , MVT::f32, Expand); 297 setOperationAction(ISD::FREM , MVT::f32, Expand); 298 299 // Expand FP immediates into loads from the stack, except for the special 300 // cases we handle. 301 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 302 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 303 addLegalFPImmediate(APFloat(+0.0)); // xorps / xorpd 304 305 // Conversions to long double (in X87) go through memory. 306 setConvertAction(MVT::f32, MVT::f80, Expand); 307 setConvertAction(MVT::f64, MVT::f80, Expand); 308 309 // Conversions from long double (in X87) go through memory. 310 setConvertAction(MVT::f80, MVT::f32, Expand); 311 setConvertAction(MVT::f80, MVT::f64, Expand); 312 } else { 313 // Set up the FP register classes. 314 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 315 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 316 317 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 318 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 319 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 320 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 321 322 // Floating truncations need to go through memory. 323 setConvertAction(MVT::f80, MVT::f32, Expand); 324 setConvertAction(MVT::f64, MVT::f32, Expand); 325 setConvertAction(MVT::f80, MVT::f64, Expand); 326 327 if (!UnsafeFPMath) { 328 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 329 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 330 } 331 332 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 333 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 334 addLegalFPImmediate(APFloat(+0.0)); // FLD0 335 addLegalFPImmediate(APFloat(+1.0)); // FLD1 336 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 337 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 338 } 339 340 // Long double always uses X87. 341 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 342 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 343 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 344 setOperationAction(ISD::ConstantFP, MVT::f80, Expand); 345 346 // First set operation action for all vector types to expand. Then we 347 // will selectively turn on ones that can be effectively codegen'd. 348 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 349 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 350 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); 351 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); 352 setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); 353 setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); 354 setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); 355 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 356 setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); 357 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 358 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 359 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 360 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 361 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 362 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); 363 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); 364 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 365 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 366 setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand); 367 setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand); 368 setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand); 369 setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand); 370 setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand); 371 setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand); 372 setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand); 373 } 374 375 if (Subtarget->hasMMX()) { 376 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass); 377 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass); 378 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass); 379 addRegisterClass(MVT::v1i64, X86::VR64RegisterClass); 380 381 // FIXME: add MMX packed arithmetics 382 383 setOperationAction(ISD::ADD, MVT::v8i8, Legal); 384 setOperationAction(ISD::ADD, MVT::v4i16, Legal); 385 setOperationAction(ISD::ADD, MVT::v2i32, Legal); 386 setOperationAction(ISD::ADD, MVT::v1i64, Legal); 387 388 setOperationAction(ISD::SUB, MVT::v8i8, Legal); 389 setOperationAction(ISD::SUB, MVT::v4i16, Legal); 390 setOperationAction(ISD::SUB, MVT::v2i32, Legal); 391 392 setOperationAction(ISD::MULHS, MVT::v4i16, Legal); 393 setOperationAction(ISD::MUL, MVT::v4i16, Legal); 394 395 setOperationAction(ISD::AND, MVT::v8i8, Promote); 396 AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64); 397 setOperationAction(ISD::AND, MVT::v4i16, Promote); 398 AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64); 399 setOperationAction(ISD::AND, MVT::v2i32, Promote); 400 AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64); 401 setOperationAction(ISD::AND, MVT::v1i64, Legal); 402 403 setOperationAction(ISD::OR, MVT::v8i8, Promote); 404 AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64); 405 setOperationAction(ISD::OR, MVT::v4i16, Promote); 406 AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64); 407 setOperationAction(ISD::OR, MVT::v2i32, Promote); 408 AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64); 409 setOperationAction(ISD::OR, MVT::v1i64, Legal); 410 411 setOperationAction(ISD::XOR, MVT::v8i8, Promote); 412 AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64); 413 setOperationAction(ISD::XOR, MVT::v4i16, Promote); 414 AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64); 415 setOperationAction(ISD::XOR, MVT::v2i32, Promote); 416 AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64); 417 setOperationAction(ISD::XOR, MVT::v1i64, Legal); 418 419 setOperationAction(ISD::LOAD, MVT::v8i8, Promote); 420 AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64); 421 setOperationAction(ISD::LOAD, MVT::v4i16, Promote); 422 AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64); 423 setOperationAction(ISD::LOAD, MVT::v2i32, Promote); 424 AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64); 425 setOperationAction(ISD::LOAD, MVT::v1i64, Legal); 426 427 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); 428 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 429 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); 430 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); 431 432 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 433 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 434 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); 435 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); 436 437 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom); 438 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom); 439 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Custom); 440 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom); 441 } 442 443 if (Subtarget->hasSSE1()) { 444 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 445 446 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 447 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 448 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 449 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 450 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 451 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 452 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 453 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 454 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 455 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 456 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 457 } 458 459 if (Subtarget->hasSSE2()) { 460 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 461 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 462 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 463 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 464 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 465 466 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 467 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 468 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 469 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 470 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 471 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 472 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 473 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 474 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 475 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 476 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 477 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 478 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 479 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 480 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 481 482 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 483 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 484 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 485 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 486 // Implement v4f32 insert_vector_elt in terms of SSE2 v8i16 ones. 487 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 488 489 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 490 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 491 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); 492 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); 493 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); 494 } 495 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 496 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 497 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 498 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 499 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 500 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 501 502 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 503 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 504 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); 505 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); 506 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); 507 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); 508 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); 509 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); 510 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); 511 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); 512 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 513 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); 514 } 515 516 // Custom lower v2i64 and v2f64 selects. 517 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 518 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 519 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 520 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 521 } 522 523 // We want to custom lower some of our intrinsics. 524 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 525 526 // We have target-specific dag combine patterns for the following nodes: 527 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 528 setTargetDAGCombine(ISD::SELECT); 529 530 computeRegisterProperties(); 531 532 // FIXME: These should be based on subtarget info. Plus, the values should 533 // be smaller when we are in optimizing for size mode. 534 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores 535 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores 536 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores 537 allowUnalignedMemoryAccesses = true; // x86 supports it! 538} 539 540 541//===----------------------------------------------------------------------===// 542// Return Value Calling Convention Implementation 543//===----------------------------------------------------------------------===// 544 545#include "X86GenCallingConv.inc" 546 547/// LowerRET - Lower an ISD::RET node. 548SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { 549 assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args"); 550 551 SmallVector<CCValAssign, 16> RVLocs; 552 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 553 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 554 CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs); 555 CCInfo.AnalyzeReturn(Op.Val, RetCC_X86); 556 557 558 // If this is the first return lowered for this function, add the regs to the 559 // liveout set for the function. 560 if (DAG.getMachineFunction().liveout_empty()) { 561 for (unsigned i = 0; i != RVLocs.size(); ++i) 562 if (RVLocs[i].isRegLoc()) 563 DAG.getMachineFunction().addLiveOut(RVLocs[i].getLocReg()); 564 } 565 566 SDOperand Chain = Op.getOperand(0); 567 SDOperand Flag; 568 569 // Copy the result values into the output registers. 570 if (RVLocs.size() != 1 || !RVLocs[0].isRegLoc() || 571 RVLocs[0].getLocReg() != X86::ST0) { 572 for (unsigned i = 0; i != RVLocs.size(); ++i) { 573 CCValAssign &VA = RVLocs[i]; 574 assert(VA.isRegLoc() && "Can only return in registers!"); 575 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1), 576 Flag); 577 Flag = Chain.getValue(1); 578 } 579 } else { 580 // We need to handle a destination of ST0 specially, because it isn't really 581 // a register. 582 SDOperand Value = Op.getOperand(1); 583 584 // If this is an FP return with ScalarSSE, we need to move the value from 585 // an XMM register onto the fp-stack. 586 if (X86ScalarSSE) { 587 SDOperand MemLoc; 588 589 // If this is a load into a scalarsse value, don't store the loaded value 590 // back to the stack, only to reload it: just replace the scalar-sse load. 591 if (ISD::isNON_EXTLoad(Value.Val) && 592 (Chain == Value.getValue(1) || Chain == Value.getOperand(0))) { 593 Chain = Value.getOperand(0); 594 MemLoc = Value.getOperand(1); 595 } else { 596 // Spill the value to memory and reload it into top of stack. 597 unsigned Size = MVT::getSizeInBits(RVLocs[0].getValVT())/8; 598 MachineFunction &MF = DAG.getMachineFunction(); 599 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 600 MemLoc = DAG.getFrameIndex(SSFI, getPointerTy()); 601 Chain = DAG.getStore(Op.getOperand(0), Value, MemLoc, NULL, 0); 602 } 603 SDVTList Tys = DAG.getVTList(RVLocs[0].getValVT(), MVT::Other); 604 SDOperand Ops[] = {Chain, MemLoc, DAG.getValueType(RVLocs[0].getValVT())}; 605 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 606 Chain = Value.getValue(1); 607 } 608 609 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 610 SDOperand Ops[] = { Chain, Value }; 611 Chain = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops, 2); 612 Flag = Chain.getValue(1); 613 } 614 615 SDOperand BytesToPop = DAG.getConstant(getBytesToPopOnReturn(), MVT::i16); 616 if (Flag.Val) 617 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop, Flag); 618 else 619 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop); 620} 621 622 623/// LowerCallResult - Lower the result values of an ISD::CALL into the 624/// appropriate copies out of appropriate physical registers. This assumes that 625/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call 626/// being lowered. The returns a SDNode with the same number of values as the 627/// ISD::CALL. 628SDNode *X86TargetLowering:: 629LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, 630 unsigned CallingConv, SelectionDAG &DAG) { 631 632 // Assign locations to each value returned by this call. 633 SmallVector<CCValAssign, 16> RVLocs; 634 bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0; 635 CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs); 636 CCInfo.AnalyzeCallResult(TheCall, RetCC_X86); 637 638 639 SmallVector<SDOperand, 8> ResultVals; 640 641 // Copy all of the result registers out of their specified physreg. 642 if (RVLocs.size() != 1 || RVLocs[0].getLocReg() != X86::ST0) { 643 for (unsigned i = 0; i != RVLocs.size(); ++i) { 644 Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(), 645 RVLocs[i].getValVT(), InFlag).getValue(1); 646 InFlag = Chain.getValue(2); 647 ResultVals.push_back(Chain.getValue(0)); 648 } 649 } else { 650 // Copies from the FP stack are special, as ST0 isn't a valid register 651 // before the fp stackifier runs. 652 653 // Copy ST0 into an RFP register with FP_GET_RESULT. 654 SDVTList Tys = DAG.getVTList(RVLocs[0].getValVT(), MVT::Other, MVT::Flag); 655 SDOperand GROps[] = { Chain, InFlag }; 656 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, GROps, 2); 657 Chain = RetVal.getValue(1); 658 InFlag = RetVal.getValue(2); 659 660 // If we are using ScalarSSE, store ST(0) to the stack and reload it into 661 // an XMM register. 662 if (X86ScalarSSE) { 663 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This 664 // shouldn't be necessary except that RFP cannot be live across 665 // multiple blocks. When stackifier is fixed, they can be uncoupled. 666 MachineFunction &MF = DAG.getMachineFunction(); 667 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 668 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 669 SDOperand Ops[] = { 670 Chain, RetVal, StackSlot, DAG.getValueType(RVLocs[0].getValVT()), InFlag 671 }; 672 Chain = DAG.getNode(X86ISD::FST, MVT::Other, Ops, 5); 673 RetVal = DAG.getLoad(RVLocs[0].getValVT(), Chain, StackSlot, NULL, 0); 674 Chain = RetVal.getValue(1); 675 } 676 ResultVals.push_back(RetVal); 677 } 678 679 // Merge everything together with a MERGE_VALUES node. 680 ResultVals.push_back(Chain); 681 return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(), 682 &ResultVals[0], ResultVals.size()).Val; 683} 684 685 686//===----------------------------------------------------------------------===// 687// C & StdCall Calling Convention implementation 688//===----------------------------------------------------------------------===// 689// StdCall calling convention seems to be standard for many Windows' API 690// routines and around. It differs from C calling convention just a little: 691// callee should clean up the stack, not caller. Symbols should be also 692// decorated in some fancy way :) It doesn't support any vector arguments. 693 694/// AddLiveIn - This helper function adds the specified physical register to the 695/// MachineFunction as a live in value. It also creates a corresponding virtual 696/// register for it. 697static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg, 698 const TargetRegisterClass *RC) { 699 assert(RC->contains(PReg) && "Not the correct regclass!"); 700 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC); 701 MF.addLiveIn(PReg, VReg); 702 return VReg; 703} 704 705SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG, 706 const CCValAssign &VA, 707 MachineFrameInfo *MFI, 708 SDOperand Root, unsigned i) { 709 // Create the nodes corresponding to a load from this parameter slot. 710 int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, 711 VA.getLocMemOffset()); 712 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 713 714 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue(); 715 716 if (Flags & ISD::ParamFlags::ByVal) 717 return FIN; 718 else 719 return DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0); 720} 721 722SDOperand X86TargetLowering::LowerCCCArguments(SDOperand Op, SelectionDAG &DAG, 723 bool isStdCall) { 724 unsigned NumArgs = Op.Val->getNumValues() - 1; 725 MachineFunction &MF = DAG.getMachineFunction(); 726 MachineFrameInfo *MFI = MF.getFrameInfo(); 727 SDOperand Root = Op.getOperand(0); 728 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 729 730 // Assign locations to all of the incoming arguments. 731 SmallVector<CCValAssign, 16> ArgLocs; 732 CCState CCInfo(MF.getFunction()->getCallingConv(), isVarArg, 733 getTargetMachine(), ArgLocs); 734 CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_C); 735 736 SmallVector<SDOperand, 8> ArgValues; 737 unsigned LastVal = ~0U; 738 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 739 CCValAssign &VA = ArgLocs[i]; 740 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 741 // places. 742 assert(VA.getValNo() != LastVal && 743 "Don't support value assigned to multiple locs yet"); 744 LastVal = VA.getValNo(); 745 746 if (VA.isRegLoc()) { 747 MVT::ValueType RegVT = VA.getLocVT(); 748 TargetRegisterClass *RC; 749 if (RegVT == MVT::i32) 750 RC = X86::GR32RegisterClass; 751 else { 752 assert(MVT::isVector(RegVT)); 753 RC = X86::VR128RegisterClass; 754 } 755 756 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 757 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 758 759 // If this is an 8 or 16-bit value, it is really passed promoted to 32 760 // bits. Insert an assert[sz]ext to capture this, then truncate to the 761 // right size. 762 if (VA.getLocInfo() == CCValAssign::SExt) 763 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 764 DAG.getValueType(VA.getValVT())); 765 else if (VA.getLocInfo() == CCValAssign::ZExt) 766 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 767 DAG.getValueType(VA.getValVT())); 768 769 if (VA.getLocInfo() != CCValAssign::Full) 770 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 771 772 ArgValues.push_back(ArgValue); 773 } else { 774 assert(VA.isMemLoc()); 775 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i)); 776 } 777 } 778 779 unsigned StackSize = CCInfo.getNextStackOffset(); 780 781 ArgValues.push_back(Root); 782 783 // If the function takes variable number of arguments, make a frame index for 784 // the start of the first vararg value... for expansion of llvm.va_start. 785 if (isVarArg) 786 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); 787 788 if (isStdCall && !isVarArg) { 789 BytesToPopOnReturn = StackSize; // Callee pops everything.. 790 BytesCallerReserves = 0; 791 } else { 792 BytesToPopOnReturn = 0; // Callee pops nothing. 793 794 // If this is an sret function, the return should pop the hidden pointer. 795 if (NumArgs && 796 (cast<ConstantSDNode>(Op.getOperand(3))->getValue() & 797 ISD::ParamFlags::StructReturn)) 798 BytesToPopOnReturn = 4; 799 800 BytesCallerReserves = StackSize; 801 } 802 803 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only. 804 805 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 806 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 807 808 // Return the new list of results. 809 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 810 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 811} 812 813SDOperand X86TargetLowering::LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, 814 unsigned CC) { 815 SDOperand Chain = Op.getOperand(0); 816 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 817 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 818 SDOperand Callee = Op.getOperand(4); 819 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 820 821 // Analyze operands of the call, assigning locations to each operand. 822 SmallVector<CCValAssign, 16> ArgLocs; 823 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 824 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_C); 825 826 // Get a count of how many bytes are to be pushed on the stack. 827 unsigned NumBytes = CCInfo.getNextStackOffset(); 828 829 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 830 831 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 832 SmallVector<SDOperand, 8> MemOpChains; 833 834 SDOperand StackPtr; 835 836 // Walk the register/memloc assignments, inserting copies/loads. 837 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 838 CCValAssign &VA = ArgLocs[i]; 839 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 840 841 // Promote the value if needed. 842 switch (VA.getLocInfo()) { 843 default: assert(0 && "Unknown loc info!"); 844 case CCValAssign::Full: break; 845 case CCValAssign::SExt: 846 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 847 break; 848 case CCValAssign::ZExt: 849 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 850 break; 851 case CCValAssign::AExt: 852 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 853 break; 854 } 855 856 if (VA.isRegLoc()) { 857 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 858 } else { 859 assert(VA.isMemLoc()); 860 if (StackPtr.Val == 0) 861 StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy()); 862 SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy()); 863 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 864 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 865 } 866 } 867 868 // If the first argument is an sret pointer, remember it. 869 bool isSRet = NumOps && 870 (cast<ConstantSDNode>(Op.getOperand(6))->getValue() & 871 ISD::ParamFlags::StructReturn); 872 873 if (!MemOpChains.empty()) 874 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 875 &MemOpChains[0], MemOpChains.size()); 876 877 // Build a sequence of copy-to-reg nodes chained together with token chain 878 // and flag operands which copy the outgoing args into registers. 879 SDOperand InFlag; 880 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 881 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 882 InFlag); 883 InFlag = Chain.getValue(1); 884 } 885 886 // ELF / PIC requires GOT in the EBX register before function calls via PLT 887 // GOT pointer. 888 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 889 Subtarget->isPICStyleGOT()) { 890 Chain = DAG.getCopyToReg(Chain, X86::EBX, 891 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 892 InFlag); 893 InFlag = Chain.getValue(1); 894 } 895 896 // If the callee is a GlobalAddress node (quite common, every direct call is) 897 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 898 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 899 // We should use extra load for direct calls to dllimported functions in 900 // non-JIT mode. 901 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), 902 getTargetMachine(), true)) 903 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 904 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 905 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 906 907 // Returns a chain & a flag for retval copy to use. 908 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 909 SmallVector<SDOperand, 8> Ops; 910 Ops.push_back(Chain); 911 Ops.push_back(Callee); 912 913 // Add argument registers to the end of the list so that they are known live 914 // into the call. 915 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 916 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 917 RegsToPass[i].second.getValueType())); 918 919 // Add an implicit use GOT pointer in EBX. 920 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 921 Subtarget->isPICStyleGOT()) 922 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 923 924 if (InFlag.Val) 925 Ops.push_back(InFlag); 926 927 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, 928 NodeTys, &Ops[0], Ops.size()); 929 InFlag = Chain.getValue(1); 930 931 // Create the CALLSEQ_END node. 932 unsigned NumBytesForCalleeToPush = 0; 933 934 if (CC == CallingConv::X86_StdCall) { 935 if (isVarArg) 936 NumBytesForCalleeToPush = isSRet ? 4 : 0; 937 else 938 NumBytesForCalleeToPush = NumBytes; 939 } else { 940 // If this is is a call to a struct-return function, the callee 941 // pops the hidden struct pointer, so we have to push it back. 942 // This is common for Darwin/X86, Linux & Mingw32 targets. 943 NumBytesForCalleeToPush = isSRet ? 4 : 0; 944 } 945 946 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 947 Ops.clear(); 948 Ops.push_back(Chain); 949 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 950 Ops.push_back(DAG.getConstant(NumBytesForCalleeToPush, getPointerTy())); 951 Ops.push_back(InFlag); 952 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 953 InFlag = Chain.getValue(1); 954 955 // Handle result values, copying them out of physregs into vregs that we 956 // return. 957 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 958} 959 960 961//===----------------------------------------------------------------------===// 962// FastCall Calling Convention implementation 963//===----------------------------------------------------------------------===// 964// 965// The X86 'fastcall' calling convention passes up to two integer arguments in 966// registers (an appropriate portion of ECX/EDX), passes arguments in C order, 967// and requires that the callee pop its arguments off the stack (allowing proper 968// tail calls), and has the same return value conventions as C calling convs. 969// 970// This calling convention always arranges for the callee pop value to be 8n+4 971// bytes, which is needed for tail recursion elimination and stack alignment 972// reasons. 973SDOperand 974X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG) { 975 MachineFunction &MF = DAG.getMachineFunction(); 976 MachineFrameInfo *MFI = MF.getFrameInfo(); 977 SDOperand Root = Op.getOperand(0); 978 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 979 980 // Assign locations to all of the incoming arguments. 981 SmallVector<CCValAssign, 16> ArgLocs; 982 CCState CCInfo(MF.getFunction()->getCallingConv(), isVarArg, 983 getTargetMachine(), ArgLocs); 984 CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_FastCall); 985 986 SmallVector<SDOperand, 8> ArgValues; 987 unsigned LastVal = ~0U; 988 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 989 CCValAssign &VA = ArgLocs[i]; 990 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 991 // places. 992 assert(VA.getValNo() != LastVal && 993 "Don't support value assigned to multiple locs yet"); 994 LastVal = VA.getValNo(); 995 996 if (VA.isRegLoc()) { 997 MVT::ValueType RegVT = VA.getLocVT(); 998 TargetRegisterClass *RC; 999 if (RegVT == MVT::i32) 1000 RC = X86::GR32RegisterClass; 1001 else { 1002 assert(MVT::isVector(RegVT)); 1003 RC = X86::VR128RegisterClass; 1004 } 1005 1006 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 1007 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 1008 1009 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1010 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1011 // right size. 1012 if (VA.getLocInfo() == CCValAssign::SExt) 1013 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 1014 DAG.getValueType(VA.getValVT())); 1015 else if (VA.getLocInfo() == CCValAssign::ZExt) 1016 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 1017 DAG.getValueType(VA.getValVT())); 1018 1019 if (VA.getLocInfo() != CCValAssign::Full) 1020 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 1021 1022 ArgValues.push_back(ArgValue); 1023 } else { 1024 assert(VA.isMemLoc()); 1025 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i)); 1026 } 1027 } 1028 1029 ArgValues.push_back(Root); 1030 1031 unsigned StackSize = CCInfo.getNextStackOffset(); 1032 1033 if (!Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows()) { 1034 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1035 // arguments and the arguments after the retaddr has been pushed are aligned. 1036 if ((StackSize & 7) == 0) 1037 StackSize += 4; 1038 } 1039 1040 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 1041 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only. 1042 BytesToPopOnReturn = StackSize; // Callee pops all stack arguments. 1043 BytesCallerReserves = 0; 1044 1045 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1046 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 1047 1048 // Return the new list of results. 1049 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1050 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1051} 1052 1053SDOperand 1054X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG, 1055 const SDOperand &StackPtr, 1056 const CCValAssign &VA, 1057 SDOperand Chain, 1058 SDOperand Arg) { 1059 SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy()); 1060 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1061 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1062 unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue(); 1063 if (Flags & ISD::ParamFlags::ByVal) { 1064 unsigned Align = 1 << ((Flags & ISD::ParamFlags::ByValAlign) >> 1065 ISD::ParamFlags::ByValAlignOffs); 1066 1067 assert (Align >= 8); 1068 unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >> 1069 ISD::ParamFlags::ByValSizeOffs; 1070 1071 SDOperand AlignNode = DAG.getConstant(Align, MVT::i32); 1072 SDOperand SizeNode = DAG.getConstant(Size, MVT::i32); 1073 1074 return DAG.getNode(ISD::MEMCPY, MVT::Other, Chain, PtrOff, Arg, SizeNode, 1075 AlignNode); 1076 } else { 1077 return DAG.getStore(Chain, Arg, PtrOff, NULL, 0); 1078 } 1079} 1080 1081SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG, 1082 unsigned CC) { 1083 SDOperand Chain = Op.getOperand(0); 1084 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 1085 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1086 SDOperand Callee = Op.getOperand(4); 1087 1088 // Analyze operands of the call, assigning locations to each operand. 1089 SmallVector<CCValAssign, 16> ArgLocs; 1090 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1091 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_FastCall); 1092 1093 // Get a count of how many bytes are to be pushed on the stack. 1094 unsigned NumBytes = CCInfo.getNextStackOffset(); 1095 1096 if (!Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows()) { 1097 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1098 // arguments and the arguments after the retaddr has been pushed are aligned. 1099 if ((NumBytes & 7) == 0) 1100 NumBytes += 4; 1101 } 1102 1103 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 1104 1105 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1106 SmallVector<SDOperand, 8> MemOpChains; 1107 1108 SDOperand StackPtr; 1109 1110 // Walk the register/memloc assignments, inserting copies/loads. 1111 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1112 CCValAssign &VA = ArgLocs[i]; 1113 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1114 1115 // Promote the value if needed. 1116 switch (VA.getLocInfo()) { 1117 default: assert(0 && "Unknown loc info!"); 1118 case CCValAssign::Full: break; 1119 case CCValAssign::SExt: 1120 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1121 break; 1122 case CCValAssign::ZExt: 1123 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1124 break; 1125 case CCValAssign::AExt: 1126 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1127 break; 1128 } 1129 1130 if (VA.isRegLoc()) { 1131 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1132 } else { 1133 assert(VA.isMemLoc()); 1134 if (StackPtr.Val == 0) 1135 StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy()); 1136 SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy()); 1137 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1138 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1139 } 1140 } 1141 1142 if (!MemOpChains.empty()) 1143 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1144 &MemOpChains[0], MemOpChains.size()); 1145 1146 // Build a sequence of copy-to-reg nodes chained together with token chain 1147 // and flag operands which copy the outgoing args into registers. 1148 SDOperand InFlag; 1149 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1150 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1151 InFlag); 1152 InFlag = Chain.getValue(1); 1153 } 1154 1155 // If the callee is a GlobalAddress node (quite common, every direct call is) 1156 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1157 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1158 // We should use extra load for direct calls to dllimported functions in 1159 // non-JIT mode. 1160 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1161 getTargetMachine(), true)) 1162 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1163 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1164 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1165 1166 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1167 // GOT pointer. 1168 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1169 Subtarget->isPICStyleGOT()) { 1170 Chain = DAG.getCopyToReg(Chain, X86::EBX, 1171 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 1172 InFlag); 1173 InFlag = Chain.getValue(1); 1174 } 1175 1176 // Returns a chain & a flag for retval copy to use. 1177 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1178 SmallVector<SDOperand, 8> Ops; 1179 Ops.push_back(Chain); 1180 Ops.push_back(Callee); 1181 1182 // Add argument registers to the end of the list so that they are known live 1183 // into the call. 1184 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1185 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1186 RegsToPass[i].second.getValueType())); 1187 1188 // Add an implicit use GOT pointer in EBX. 1189 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1190 Subtarget->isPICStyleGOT()) 1191 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 1192 1193 if (InFlag.Val) 1194 Ops.push_back(InFlag); 1195 1196 // FIXME: Do not generate X86ISD::TAILCALL for now. 1197 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, 1198 NodeTys, &Ops[0], Ops.size()); 1199 InFlag = Chain.getValue(1); 1200 1201 // Returns a flag for retval copy to use. 1202 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1203 Ops.clear(); 1204 Ops.push_back(Chain); 1205 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1206 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1207 Ops.push_back(InFlag); 1208 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1209 InFlag = Chain.getValue(1); 1210 1211 // Handle result values, copying them out of physregs into vregs that we 1212 // return. 1213 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1214} 1215 1216 1217//===----------------------------------------------------------------------===// 1218// X86-64 C Calling Convention implementation 1219//===----------------------------------------------------------------------===// 1220 1221SDOperand 1222X86TargetLowering::LowerX86_64CCCArguments(SDOperand Op, SelectionDAG &DAG) { 1223 MachineFunction &MF = DAG.getMachineFunction(); 1224 MachineFrameInfo *MFI = MF.getFrameInfo(); 1225 SDOperand Root = Op.getOperand(0); 1226 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1227 1228 static const unsigned GPR64ArgRegs[] = { 1229 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1230 }; 1231 static const unsigned XMMArgRegs[] = { 1232 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1233 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1234 }; 1235 1236 1237 // Assign locations to all of the incoming arguments. 1238 SmallVector<CCValAssign, 16> ArgLocs; 1239 CCState CCInfo(MF.getFunction()->getCallingConv(), isVarArg, 1240 getTargetMachine(), ArgLocs); 1241 CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_64_C); 1242 1243 SmallVector<SDOperand, 8> ArgValues; 1244 unsigned LastVal = ~0U; 1245 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1246 CCValAssign &VA = ArgLocs[i]; 1247 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1248 // places. 1249 assert(VA.getValNo() != LastVal && 1250 "Don't support value assigned to multiple locs yet"); 1251 LastVal = VA.getValNo(); 1252 1253 if (VA.isRegLoc()) { 1254 MVT::ValueType RegVT = VA.getLocVT(); 1255 TargetRegisterClass *RC; 1256 if (RegVT == MVT::i32) 1257 RC = X86::GR32RegisterClass; 1258 else if (RegVT == MVT::i64) 1259 RC = X86::GR64RegisterClass; 1260 else if (RegVT == MVT::f32) 1261 RC = X86::FR32RegisterClass; 1262 else if (RegVT == MVT::f64) 1263 RC = X86::FR64RegisterClass; 1264 else { 1265 assert(MVT::isVector(RegVT)); 1266 if (MVT::getSizeInBits(RegVT) == 64) { 1267 RC = X86::GR64RegisterClass; // MMX values are passed in GPRs. 1268 RegVT = MVT::i64; 1269 } else 1270 RC = X86::VR128RegisterClass; 1271 } 1272 1273 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 1274 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 1275 1276 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1277 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1278 // right size. 1279 if (VA.getLocInfo() == CCValAssign::SExt) 1280 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 1281 DAG.getValueType(VA.getValVT())); 1282 else if (VA.getLocInfo() == CCValAssign::ZExt) 1283 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 1284 DAG.getValueType(VA.getValVT())); 1285 1286 if (VA.getLocInfo() != CCValAssign::Full) 1287 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 1288 1289 // Handle MMX values passed in GPRs. 1290 if (RegVT != VA.getLocVT() && RC == X86::GR64RegisterClass && 1291 MVT::getSizeInBits(RegVT) == 64) 1292 ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); 1293 1294 ArgValues.push_back(ArgValue); 1295 } else { 1296 assert(VA.isMemLoc()); 1297 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i)); 1298 } 1299 } 1300 1301 unsigned StackSize = CCInfo.getNextStackOffset(); 1302 1303 // If the function takes variable number of arguments, make a frame index for 1304 // the start of the first vararg value... for expansion of llvm.va_start. 1305 if (isVarArg) { 1306 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 6); 1307 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1308 1309 // For X86-64, if there are vararg parameters that are passed via 1310 // registers, then we must store them to their spots on the stack so they 1311 // may be loaded by deferencing the result of va_next. 1312 VarArgsGPOffset = NumIntRegs * 8; 1313 VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16; 1314 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); 1315 RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16); 1316 1317 // Store the integer parameter registers. 1318 SmallVector<SDOperand, 8> MemOps; 1319 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 1320 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1321 DAG.getConstant(VarArgsGPOffset, getPointerTy())); 1322 for (; NumIntRegs != 6; ++NumIntRegs) { 1323 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], 1324 X86::GR64RegisterClass); 1325 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1326 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1327 MemOps.push_back(Store); 1328 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1329 DAG.getConstant(8, getPointerTy())); 1330 } 1331 1332 // Now store the XMM (fp + vector) parameter registers. 1333 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1334 DAG.getConstant(VarArgsFPOffset, getPointerTy())); 1335 for (; NumXMMRegs != 8; ++NumXMMRegs) { 1336 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 1337 X86::VR128RegisterClass); 1338 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); 1339 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1340 MemOps.push_back(Store); 1341 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1342 DAG.getConstant(16, getPointerTy())); 1343 } 1344 if (!MemOps.empty()) 1345 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 1346 &MemOps[0], MemOps.size()); 1347 } 1348 1349 ArgValues.push_back(Root); 1350 1351 BytesToPopOnReturn = 0; // Callee pops nothing. 1352 BytesCallerReserves = StackSize; 1353 1354 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1355 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 1356 1357 // Return the new list of results. 1358 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1359 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1360} 1361 1362SDOperand 1363X86TargetLowering::LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG, 1364 unsigned CC) { 1365 SDOperand Chain = Op.getOperand(0); 1366 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1367 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 1368 SDOperand Callee = Op.getOperand(4); 1369 1370 // Analyze operands of the call, assigning locations to each operand. 1371 SmallVector<CCValAssign, 16> ArgLocs; 1372 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1373 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_C); 1374 1375 // Get a count of how many bytes are to be pushed on the stack. 1376 unsigned NumBytes = CCInfo.getNextStackOffset(); 1377 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 1378 1379 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1380 SmallVector<SDOperand, 8> MemOpChains; 1381 1382 SDOperand StackPtr; 1383 1384 // Walk the register/memloc assignments, inserting copies/loads. 1385 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1386 CCValAssign &VA = ArgLocs[i]; 1387 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1388 1389 // Promote the value if needed. 1390 switch (VA.getLocInfo()) { 1391 default: assert(0 && "Unknown loc info!"); 1392 case CCValAssign::Full: break; 1393 case CCValAssign::SExt: 1394 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1395 break; 1396 case CCValAssign::ZExt: 1397 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1398 break; 1399 case CCValAssign::AExt: 1400 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1401 break; 1402 } 1403 1404 if (VA.isRegLoc()) { 1405 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1406 } else { 1407 assert(VA.isMemLoc()); 1408 if (StackPtr.Val == 0) 1409 StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy()); 1410 1411 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1412 Arg)); 1413 } 1414 } 1415 1416 if (!MemOpChains.empty()) 1417 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1418 &MemOpChains[0], MemOpChains.size()); 1419 1420 // Build a sequence of copy-to-reg nodes chained together with token chain 1421 // and flag operands which copy the outgoing args into registers. 1422 SDOperand InFlag; 1423 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1424 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1425 InFlag); 1426 InFlag = Chain.getValue(1); 1427 } 1428 1429 if (isVarArg) { 1430 // From AMD64 ABI document: 1431 // For calls that may call functions that use varargs or stdargs 1432 // (prototype-less calls or calls to functions containing ellipsis (...) in 1433 // the declaration) %al is used as hidden argument to specify the number 1434 // of SSE registers used. The contents of %al do not need to match exactly 1435 // the number of registers, but must be an ubound on the number of SSE 1436 // registers used and is in the range 0 - 8 inclusive. 1437 1438 // Count the number of XMM registers allocated. 1439 static const unsigned XMMArgRegs[] = { 1440 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1441 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1442 }; 1443 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1444 1445 Chain = DAG.getCopyToReg(Chain, X86::AL, 1446 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 1447 InFlag = Chain.getValue(1); 1448 } 1449 1450 // If the callee is a GlobalAddress node (quite common, every direct call is) 1451 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1452 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1453 // We should use extra load for direct calls to dllimported functions in 1454 // non-JIT mode. 1455 if (getTargetMachine().getCodeModel() != CodeModel::Large 1456 && !Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1457 getTargetMachine(), true)) 1458 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1459 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1460 if (getTargetMachine().getCodeModel() != CodeModel::Large) 1461 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1462 1463 // Returns a chain & a flag for retval copy to use. 1464 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1465 SmallVector<SDOperand, 8> Ops; 1466 Ops.push_back(Chain); 1467 Ops.push_back(Callee); 1468 1469 // Add argument registers to the end of the list so that they are known live 1470 // into the call. 1471 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1472 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1473 RegsToPass[i].second.getValueType())); 1474 1475 if (InFlag.Val) 1476 Ops.push_back(InFlag); 1477 1478 // FIXME: Do not generate X86ISD::TAILCALL for now. 1479 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, 1480 NodeTys, &Ops[0], Ops.size()); 1481 InFlag = Chain.getValue(1); 1482 1483 // Returns a flag for retval copy to use. 1484 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1485 Ops.clear(); 1486 Ops.push_back(Chain); 1487 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1488 Ops.push_back(DAG.getConstant(0, getPointerTy())); 1489 Ops.push_back(InFlag); 1490 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1491 InFlag = Chain.getValue(1); 1492 1493 // Handle result values, copying them out of physregs into vregs that we 1494 // return. 1495 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1496} 1497 1498 1499//===----------------------------------------------------------------------===// 1500// Other Lowering Hooks 1501//===----------------------------------------------------------------------===// 1502 1503 1504SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { 1505 MachineFunction &MF = DAG.getMachineFunction(); 1506 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1507 int ReturnAddrIndex = FuncInfo->getRAIndex(); 1508 1509 if (ReturnAddrIndex == 0) { 1510 // Set up a frame object for the return address. 1511 if (Subtarget->is64Bit()) 1512 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8); 1513 else 1514 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4); 1515 1516 FuncInfo->setRAIndex(ReturnAddrIndex); 1517 } 1518 1519 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 1520} 1521 1522 1523 1524/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86 1525/// specific condition code. It returns a false if it cannot do a direct 1526/// translation. X86CC is the translated CondCode. LHS/RHS are modified as 1527/// needed. 1528static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 1529 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS, 1530 SelectionDAG &DAG) { 1531 X86CC = X86::COND_INVALID; 1532 if (!isFP) { 1533 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 1534 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 1535 // X > -1 -> X == 0, jump !sign. 1536 RHS = DAG.getConstant(0, RHS.getValueType()); 1537 X86CC = X86::COND_NS; 1538 return true; 1539 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 1540 // X < 0 -> X == 0, jump on sign. 1541 X86CC = X86::COND_S; 1542 return true; 1543 } else if (SetCCOpcode == ISD::SETLT && RHSC->getValue() == 1) { 1544 // X < 1 -> X <= 0 1545 RHS = DAG.getConstant(0, RHS.getValueType()); 1546 X86CC = X86::COND_LE; 1547 return true; 1548 } 1549 } 1550 1551 switch (SetCCOpcode) { 1552 default: break; 1553 case ISD::SETEQ: X86CC = X86::COND_E; break; 1554 case ISD::SETGT: X86CC = X86::COND_G; break; 1555 case ISD::SETGE: X86CC = X86::COND_GE; break; 1556 case ISD::SETLT: X86CC = X86::COND_L; break; 1557 case ISD::SETLE: X86CC = X86::COND_LE; break; 1558 case ISD::SETNE: X86CC = X86::COND_NE; break; 1559 case ISD::SETULT: X86CC = X86::COND_B; break; 1560 case ISD::SETUGT: X86CC = X86::COND_A; break; 1561 case ISD::SETULE: X86CC = X86::COND_BE; break; 1562 case ISD::SETUGE: X86CC = X86::COND_AE; break; 1563 } 1564 } else { 1565 // On a floating point condition, the flags are set as follows: 1566 // ZF PF CF op 1567 // 0 | 0 | 0 | X > Y 1568 // 0 | 0 | 1 | X < Y 1569 // 1 | 0 | 0 | X == Y 1570 // 1 | 1 | 1 | unordered 1571 bool Flip = false; 1572 switch (SetCCOpcode) { 1573 default: break; 1574 case ISD::SETUEQ: 1575 case ISD::SETEQ: X86CC = X86::COND_E; break; 1576 case ISD::SETOLT: Flip = true; // Fallthrough 1577 case ISD::SETOGT: 1578 case ISD::SETGT: X86CC = X86::COND_A; break; 1579 case ISD::SETOLE: Flip = true; // Fallthrough 1580 case ISD::SETOGE: 1581 case ISD::SETGE: X86CC = X86::COND_AE; break; 1582 case ISD::SETUGT: Flip = true; // Fallthrough 1583 case ISD::SETULT: 1584 case ISD::SETLT: X86CC = X86::COND_B; break; 1585 case ISD::SETUGE: Flip = true; // Fallthrough 1586 case ISD::SETULE: 1587 case ISD::SETLE: X86CC = X86::COND_BE; break; 1588 case ISD::SETONE: 1589 case ISD::SETNE: X86CC = X86::COND_NE; break; 1590 case ISD::SETUO: X86CC = X86::COND_P; break; 1591 case ISD::SETO: X86CC = X86::COND_NP; break; 1592 } 1593 if (Flip) 1594 std::swap(LHS, RHS); 1595 } 1596 1597 return X86CC != X86::COND_INVALID; 1598} 1599 1600/// hasFPCMov - is there a floating point cmov for the specific X86 condition 1601/// code. Current x86 isa includes the following FP cmov instructions: 1602/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 1603static bool hasFPCMov(unsigned X86CC) { 1604 switch (X86CC) { 1605 default: 1606 return false; 1607 case X86::COND_B: 1608 case X86::COND_BE: 1609 case X86::COND_E: 1610 case X86::COND_P: 1611 case X86::COND_A: 1612 case X86::COND_AE: 1613 case X86::COND_NE: 1614 case X86::COND_NP: 1615 return true; 1616 } 1617} 1618 1619/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return 1620/// true if Op is undef or if its value falls within the specified range (L, H]. 1621static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { 1622 if (Op.getOpcode() == ISD::UNDEF) 1623 return true; 1624 1625 unsigned Val = cast<ConstantSDNode>(Op)->getValue(); 1626 return (Val >= Low && Val < Hi); 1627} 1628 1629/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return 1630/// true if Op is undef or if its value equal to the specified value. 1631static bool isUndefOrEqual(SDOperand Op, unsigned Val) { 1632 if (Op.getOpcode() == ISD::UNDEF) 1633 return true; 1634 return cast<ConstantSDNode>(Op)->getValue() == Val; 1635} 1636 1637/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand 1638/// specifies a shuffle of elements that is suitable for input to PSHUFD. 1639bool X86::isPSHUFDMask(SDNode *N) { 1640 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1641 1642 if (N->getNumOperands() != 2 && N->getNumOperands() != 4) 1643 return false; 1644 1645 // Check if the value doesn't reference the second vector. 1646 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1647 SDOperand Arg = N->getOperand(i); 1648 if (Arg.getOpcode() == ISD::UNDEF) continue; 1649 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1650 if (cast<ConstantSDNode>(Arg)->getValue() >= e) 1651 return false; 1652 } 1653 1654 return true; 1655} 1656 1657/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand 1658/// specifies a shuffle of elements that is suitable for input to PSHUFHW. 1659bool X86::isPSHUFHWMask(SDNode *N) { 1660 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1661 1662 if (N->getNumOperands() != 8) 1663 return false; 1664 1665 // Lower quadword copied in order. 1666 for (unsigned i = 0; i != 4; ++i) { 1667 SDOperand Arg = N->getOperand(i); 1668 if (Arg.getOpcode() == ISD::UNDEF) continue; 1669 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1670 if (cast<ConstantSDNode>(Arg)->getValue() != i) 1671 return false; 1672 } 1673 1674 // Upper quadword shuffled. 1675 for (unsigned i = 4; i != 8; ++i) { 1676 SDOperand Arg = N->getOperand(i); 1677 if (Arg.getOpcode() == ISD::UNDEF) continue; 1678 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1679 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 1680 if (Val < 4 || Val > 7) 1681 return false; 1682 } 1683 1684 return true; 1685} 1686 1687/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand 1688/// specifies a shuffle of elements that is suitable for input to PSHUFLW. 1689bool X86::isPSHUFLWMask(SDNode *N) { 1690 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1691 1692 if (N->getNumOperands() != 8) 1693 return false; 1694 1695 // Upper quadword copied in order. 1696 for (unsigned i = 4; i != 8; ++i) 1697 if (!isUndefOrEqual(N->getOperand(i), i)) 1698 return false; 1699 1700 // Lower quadword shuffled. 1701 for (unsigned i = 0; i != 4; ++i) 1702 if (!isUndefOrInRange(N->getOperand(i), 0, 4)) 1703 return false; 1704 1705 return true; 1706} 1707 1708/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 1709/// specifies a shuffle of elements that is suitable for input to SHUFP*. 1710static bool isSHUFPMask(const SDOperand *Elems, unsigned NumElems) { 1711 if (NumElems != 2 && NumElems != 4) return false; 1712 1713 unsigned Half = NumElems / 2; 1714 for (unsigned i = 0; i < Half; ++i) 1715 if (!isUndefOrInRange(Elems[i], 0, NumElems)) 1716 return false; 1717 for (unsigned i = Half; i < NumElems; ++i) 1718 if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2)) 1719 return false; 1720 1721 return true; 1722} 1723 1724bool X86::isSHUFPMask(SDNode *N) { 1725 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1726 return ::isSHUFPMask(N->op_begin(), N->getNumOperands()); 1727} 1728 1729/// isCommutedSHUFP - Returns true if the shuffle mask is exactly 1730/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 1731/// half elements to come from vector 1 (which would equal the dest.) and 1732/// the upper half to come from vector 2. 1733static bool isCommutedSHUFP(const SDOperand *Ops, unsigned NumOps) { 1734 if (NumOps != 2 && NumOps != 4) return false; 1735 1736 unsigned Half = NumOps / 2; 1737 for (unsigned i = 0; i < Half; ++i) 1738 if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2)) 1739 return false; 1740 for (unsigned i = Half; i < NumOps; ++i) 1741 if (!isUndefOrInRange(Ops[i], 0, NumOps)) 1742 return false; 1743 return true; 1744} 1745 1746static bool isCommutedSHUFP(SDNode *N) { 1747 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1748 return isCommutedSHUFP(N->op_begin(), N->getNumOperands()); 1749} 1750 1751/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 1752/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 1753bool X86::isMOVHLPSMask(SDNode *N) { 1754 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1755 1756 if (N->getNumOperands() != 4) 1757 return false; 1758 1759 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 1760 return isUndefOrEqual(N->getOperand(0), 6) && 1761 isUndefOrEqual(N->getOperand(1), 7) && 1762 isUndefOrEqual(N->getOperand(2), 2) && 1763 isUndefOrEqual(N->getOperand(3), 3); 1764} 1765 1766/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 1767/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 1768/// <2, 3, 2, 3> 1769bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) { 1770 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1771 1772 if (N->getNumOperands() != 4) 1773 return false; 1774 1775 // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3 1776 return isUndefOrEqual(N->getOperand(0), 2) && 1777 isUndefOrEqual(N->getOperand(1), 3) && 1778 isUndefOrEqual(N->getOperand(2), 2) && 1779 isUndefOrEqual(N->getOperand(3), 3); 1780} 1781 1782/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 1783/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 1784bool X86::isMOVLPMask(SDNode *N) { 1785 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1786 1787 unsigned NumElems = N->getNumOperands(); 1788 if (NumElems != 2 && NumElems != 4) 1789 return false; 1790 1791 for (unsigned i = 0; i < NumElems/2; ++i) 1792 if (!isUndefOrEqual(N->getOperand(i), i + NumElems)) 1793 return false; 1794 1795 for (unsigned i = NumElems/2; i < NumElems; ++i) 1796 if (!isUndefOrEqual(N->getOperand(i), i)) 1797 return false; 1798 1799 return true; 1800} 1801 1802/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand 1803/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D} 1804/// and MOVLHPS. 1805bool X86::isMOVHPMask(SDNode *N) { 1806 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1807 1808 unsigned NumElems = N->getNumOperands(); 1809 if (NumElems != 2 && NumElems != 4) 1810 return false; 1811 1812 for (unsigned i = 0; i < NumElems/2; ++i) 1813 if (!isUndefOrEqual(N->getOperand(i), i)) 1814 return false; 1815 1816 for (unsigned i = 0; i < NumElems/2; ++i) { 1817 SDOperand Arg = N->getOperand(i + NumElems/2); 1818 if (!isUndefOrEqual(Arg, i + NumElems)) 1819 return false; 1820 } 1821 1822 return true; 1823} 1824 1825/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 1826/// specifies a shuffle of elements that is suitable for input to UNPCKL. 1827bool static isUNPCKLMask(const SDOperand *Elts, unsigned NumElts, 1828 bool V2IsSplat = false) { 1829 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 1830 return false; 1831 1832 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 1833 SDOperand BitI = Elts[i]; 1834 SDOperand BitI1 = Elts[i+1]; 1835 if (!isUndefOrEqual(BitI, j)) 1836 return false; 1837 if (V2IsSplat) { 1838 if (isUndefOrEqual(BitI1, NumElts)) 1839 return false; 1840 } else { 1841 if (!isUndefOrEqual(BitI1, j + NumElts)) 1842 return false; 1843 } 1844 } 1845 1846 return true; 1847} 1848 1849bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) { 1850 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1851 return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 1852} 1853 1854/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 1855/// specifies a shuffle of elements that is suitable for input to UNPCKH. 1856bool static isUNPCKHMask(const SDOperand *Elts, unsigned NumElts, 1857 bool V2IsSplat = false) { 1858 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 1859 return false; 1860 1861 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 1862 SDOperand BitI = Elts[i]; 1863 SDOperand BitI1 = Elts[i+1]; 1864 if (!isUndefOrEqual(BitI, j + NumElts/2)) 1865 return false; 1866 if (V2IsSplat) { 1867 if (isUndefOrEqual(BitI1, NumElts)) 1868 return false; 1869 } else { 1870 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts)) 1871 return false; 1872 } 1873 } 1874 1875 return true; 1876} 1877 1878bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) { 1879 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1880 return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 1881} 1882 1883/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 1884/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 1885/// <0, 0, 1, 1> 1886bool X86::isUNPCKL_v_undef_Mask(SDNode *N) { 1887 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1888 1889 unsigned NumElems = N->getNumOperands(); 1890 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 1891 return false; 1892 1893 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 1894 SDOperand BitI = N->getOperand(i); 1895 SDOperand BitI1 = N->getOperand(i+1); 1896 1897 if (!isUndefOrEqual(BitI, j)) 1898 return false; 1899 if (!isUndefOrEqual(BitI1, j)) 1900 return false; 1901 } 1902 1903 return true; 1904} 1905 1906/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 1907/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 1908/// <2, 2, 3, 3> 1909bool X86::isUNPCKH_v_undef_Mask(SDNode *N) { 1910 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1911 1912 unsigned NumElems = N->getNumOperands(); 1913 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 1914 return false; 1915 1916 for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { 1917 SDOperand BitI = N->getOperand(i); 1918 SDOperand BitI1 = N->getOperand(i + 1); 1919 1920 if (!isUndefOrEqual(BitI, j)) 1921 return false; 1922 if (!isUndefOrEqual(BitI1, j)) 1923 return false; 1924 } 1925 1926 return true; 1927} 1928 1929/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 1930/// specifies a shuffle of elements that is suitable for input to MOVSS, 1931/// MOVSD, and MOVD, i.e. setting the lowest element. 1932static bool isMOVLMask(const SDOperand *Elts, unsigned NumElts) { 1933 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 1934 return false; 1935 1936 if (!isUndefOrEqual(Elts[0], NumElts)) 1937 return false; 1938 1939 for (unsigned i = 1; i < NumElts; ++i) { 1940 if (!isUndefOrEqual(Elts[i], i)) 1941 return false; 1942 } 1943 1944 return true; 1945} 1946 1947bool X86::isMOVLMask(SDNode *N) { 1948 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1949 return ::isMOVLMask(N->op_begin(), N->getNumOperands()); 1950} 1951 1952/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 1953/// of what x86 movss want. X86 movs requires the lowest element to be lowest 1954/// element of vector 2 and the other elements to come from vector 1 in order. 1955static bool isCommutedMOVL(const SDOperand *Ops, unsigned NumOps, 1956 bool V2IsSplat = false, 1957 bool V2IsUndef = false) { 1958 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 1959 return false; 1960 1961 if (!isUndefOrEqual(Ops[0], 0)) 1962 return false; 1963 1964 for (unsigned i = 1; i < NumOps; ++i) { 1965 SDOperand Arg = Ops[i]; 1966 if (!(isUndefOrEqual(Arg, i+NumOps) || 1967 (V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) || 1968 (V2IsSplat && isUndefOrEqual(Arg, NumOps)))) 1969 return false; 1970 } 1971 1972 return true; 1973} 1974 1975static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false, 1976 bool V2IsUndef = false) { 1977 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1978 return isCommutedMOVL(N->op_begin(), N->getNumOperands(), 1979 V2IsSplat, V2IsUndef); 1980} 1981 1982/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 1983/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 1984bool X86::isMOVSHDUPMask(SDNode *N) { 1985 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1986 1987 if (N->getNumOperands() != 4) 1988 return false; 1989 1990 // Expect 1, 1, 3, 3 1991 for (unsigned i = 0; i < 2; ++i) { 1992 SDOperand Arg = N->getOperand(i); 1993 if (Arg.getOpcode() == ISD::UNDEF) continue; 1994 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1995 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 1996 if (Val != 1) return false; 1997 } 1998 1999 bool HasHi = false; 2000 for (unsigned i = 2; i < 4; ++i) { 2001 SDOperand Arg = N->getOperand(i); 2002 if (Arg.getOpcode() == ISD::UNDEF) continue; 2003 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2004 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2005 if (Val != 3) return false; 2006 HasHi = true; 2007 } 2008 2009 // Don't use movshdup if it can be done with a shufps. 2010 return HasHi; 2011} 2012 2013/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2014/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 2015bool X86::isMOVSLDUPMask(SDNode *N) { 2016 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2017 2018 if (N->getNumOperands() != 4) 2019 return false; 2020 2021 // Expect 0, 0, 2, 2 2022 for (unsigned i = 0; i < 2; ++i) { 2023 SDOperand Arg = N->getOperand(i); 2024 if (Arg.getOpcode() == ISD::UNDEF) continue; 2025 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2026 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2027 if (Val != 0) return false; 2028 } 2029 2030 bool HasHi = false; 2031 for (unsigned i = 2; i < 4; ++i) { 2032 SDOperand Arg = N->getOperand(i); 2033 if (Arg.getOpcode() == ISD::UNDEF) continue; 2034 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2035 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2036 if (Val != 2) return false; 2037 HasHi = true; 2038 } 2039 2040 // Don't use movshdup if it can be done with a shufps. 2041 return HasHi; 2042} 2043 2044/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand 2045/// specifies a identity operation on the LHS or RHS. 2046static bool isIdentityMask(SDNode *N, bool RHS = false) { 2047 unsigned NumElems = N->getNumOperands(); 2048 for (unsigned i = 0; i < NumElems; ++i) 2049 if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0))) 2050 return false; 2051 return true; 2052} 2053 2054/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2055/// a splat of a single element. 2056static bool isSplatMask(SDNode *N) { 2057 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2058 2059 // This is a splat operation if each element of the permute is the same, and 2060 // if the value doesn't reference the second vector. 2061 unsigned NumElems = N->getNumOperands(); 2062 SDOperand ElementBase; 2063 unsigned i = 0; 2064 for (; i != NumElems; ++i) { 2065 SDOperand Elt = N->getOperand(i); 2066 if (isa<ConstantSDNode>(Elt)) { 2067 ElementBase = Elt; 2068 break; 2069 } 2070 } 2071 2072 if (!ElementBase.Val) 2073 return false; 2074 2075 for (; i != NumElems; ++i) { 2076 SDOperand Arg = N->getOperand(i); 2077 if (Arg.getOpcode() == ISD::UNDEF) continue; 2078 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2079 if (Arg != ElementBase) return false; 2080 } 2081 2082 // Make sure it is a splat of the first vector operand. 2083 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems; 2084} 2085 2086/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2087/// a splat of a single element and it's a 2 or 4 element mask. 2088bool X86::isSplatMask(SDNode *N) { 2089 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2090 2091 // We can only splat 64-bit, and 32-bit quantities with a single instruction. 2092 if (N->getNumOperands() != 4 && N->getNumOperands() != 2) 2093 return false; 2094 return ::isSplatMask(N); 2095} 2096 2097/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand 2098/// specifies a splat of zero element. 2099bool X86::isSplatLoMask(SDNode *N) { 2100 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2101 2102 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) 2103 if (!isUndefOrEqual(N->getOperand(i), 0)) 2104 return false; 2105 return true; 2106} 2107 2108/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 2109/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* 2110/// instructions. 2111unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 2112 unsigned NumOperands = N->getNumOperands(); 2113 unsigned Shift = (NumOperands == 4) ? 2 : 1; 2114 unsigned Mask = 0; 2115 for (unsigned i = 0; i < NumOperands; ++i) { 2116 unsigned Val = 0; 2117 SDOperand Arg = N->getOperand(NumOperands-i-1); 2118 if (Arg.getOpcode() != ISD::UNDEF) 2119 Val = cast<ConstantSDNode>(Arg)->getValue(); 2120 if (Val >= NumOperands) Val -= NumOperands; 2121 Mask |= Val; 2122 if (i != NumOperands - 1) 2123 Mask <<= Shift; 2124 } 2125 2126 return Mask; 2127} 2128 2129/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 2130/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW 2131/// instructions. 2132unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 2133 unsigned Mask = 0; 2134 // 8 nodes, but we only care about the last 4. 2135 for (unsigned i = 7; i >= 4; --i) { 2136 unsigned Val = 0; 2137 SDOperand Arg = N->getOperand(i); 2138 if (Arg.getOpcode() != ISD::UNDEF) 2139 Val = cast<ConstantSDNode>(Arg)->getValue(); 2140 Mask |= (Val - 4); 2141 if (i != 4) 2142 Mask <<= 2; 2143 } 2144 2145 return Mask; 2146} 2147 2148/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 2149/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW 2150/// instructions. 2151unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 2152 unsigned Mask = 0; 2153 // 8 nodes, but we only care about the first 4. 2154 for (int i = 3; i >= 0; --i) { 2155 unsigned Val = 0; 2156 SDOperand Arg = N->getOperand(i); 2157 if (Arg.getOpcode() != ISD::UNDEF) 2158 Val = cast<ConstantSDNode>(Arg)->getValue(); 2159 Mask |= Val; 2160 if (i != 0) 2161 Mask <<= 2; 2162 } 2163 2164 return Mask; 2165} 2166 2167/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand 2168/// specifies a 8 element shuffle that can be broken into a pair of 2169/// PSHUFHW and PSHUFLW. 2170static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { 2171 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2172 2173 if (N->getNumOperands() != 8) 2174 return false; 2175 2176 // Lower quadword shuffled. 2177 for (unsigned i = 0; i != 4; ++i) { 2178 SDOperand Arg = N->getOperand(i); 2179 if (Arg.getOpcode() == ISD::UNDEF) continue; 2180 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2181 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2182 if (Val > 4) 2183 return false; 2184 } 2185 2186 // Upper quadword shuffled. 2187 for (unsigned i = 4; i != 8; ++i) { 2188 SDOperand Arg = N->getOperand(i); 2189 if (Arg.getOpcode() == ISD::UNDEF) continue; 2190 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2191 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2192 if (Val < 4 || Val > 7) 2193 return false; 2194 } 2195 2196 return true; 2197} 2198 2199/// CommuteVectorShuffle - Swap vector_shuffle operandsas well as 2200/// values in ther permute mask. 2201static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, 2202 SDOperand &V2, SDOperand &Mask, 2203 SelectionDAG &DAG) { 2204 MVT::ValueType VT = Op.getValueType(); 2205 MVT::ValueType MaskVT = Mask.getValueType(); 2206 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2207 unsigned NumElems = Mask.getNumOperands(); 2208 SmallVector<SDOperand, 8> MaskVec; 2209 2210 for (unsigned i = 0; i != NumElems; ++i) { 2211 SDOperand Arg = Mask.getOperand(i); 2212 if (Arg.getOpcode() == ISD::UNDEF) { 2213 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2214 continue; 2215 } 2216 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2217 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2218 if (Val < NumElems) 2219 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2220 else 2221 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2222 } 2223 2224 std::swap(V1, V2); 2225 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2226 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2227} 2228 2229/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 2230/// match movhlps. The lower half elements should come from upper half of 2231/// V1 (and in order), and the upper half elements should come from the upper 2232/// half of V2 (and in order). 2233static bool ShouldXformToMOVHLPS(SDNode *Mask) { 2234 unsigned NumElems = Mask->getNumOperands(); 2235 if (NumElems != 4) 2236 return false; 2237 for (unsigned i = 0, e = 2; i != e; ++i) 2238 if (!isUndefOrEqual(Mask->getOperand(i), i+2)) 2239 return false; 2240 for (unsigned i = 2; i != 4; ++i) 2241 if (!isUndefOrEqual(Mask->getOperand(i), i+4)) 2242 return false; 2243 return true; 2244} 2245 2246/// isScalarLoadToVector - Returns true if the node is a scalar load that 2247/// is promoted to a vector. 2248static inline bool isScalarLoadToVector(SDNode *N) { 2249 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) { 2250 N = N->getOperand(0).Val; 2251 return ISD::isNON_EXTLoad(N); 2252 } 2253 return false; 2254} 2255 2256/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 2257/// match movlp{s|d}. The lower half elements should come from lower half of 2258/// V1 (and in order), and the upper half elements should come from the upper 2259/// half of V2 (and in order). And since V1 will become the source of the 2260/// MOVLP, it must be either a vector load or a scalar load to vector. 2261static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) { 2262 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 2263 return false; 2264 // Is V2 is a vector load, don't do this transformation. We will try to use 2265 // load folding shufps op. 2266 if (ISD::isNON_EXTLoad(V2)) 2267 return false; 2268 2269 unsigned NumElems = Mask->getNumOperands(); 2270 if (NumElems != 2 && NumElems != 4) 2271 return false; 2272 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 2273 if (!isUndefOrEqual(Mask->getOperand(i), i)) 2274 return false; 2275 for (unsigned i = NumElems/2; i != NumElems; ++i) 2276 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems)) 2277 return false; 2278 return true; 2279} 2280 2281/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 2282/// all the same. 2283static bool isSplatVector(SDNode *N) { 2284 if (N->getOpcode() != ISD::BUILD_VECTOR) 2285 return false; 2286 2287 SDOperand SplatValue = N->getOperand(0); 2288 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 2289 if (N->getOperand(i) != SplatValue) 2290 return false; 2291 return true; 2292} 2293 2294/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2295/// to an undef. 2296static bool isUndefShuffle(SDNode *N) { 2297 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2298 return false; 2299 2300 SDOperand V1 = N->getOperand(0); 2301 SDOperand V2 = N->getOperand(1); 2302 SDOperand Mask = N->getOperand(2); 2303 unsigned NumElems = Mask.getNumOperands(); 2304 for (unsigned i = 0; i != NumElems; ++i) { 2305 SDOperand Arg = Mask.getOperand(i); 2306 if (Arg.getOpcode() != ISD::UNDEF) { 2307 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2308 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF) 2309 return false; 2310 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF) 2311 return false; 2312 } 2313 } 2314 return true; 2315} 2316 2317/// isZeroNode - Returns true if Elt is a constant zero or a floating point 2318/// constant +0.0. 2319static inline bool isZeroNode(SDOperand Elt) { 2320 return ((isa<ConstantSDNode>(Elt) && 2321 cast<ConstantSDNode>(Elt)->getValue() == 0) || 2322 (isa<ConstantFPSDNode>(Elt) && 2323 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 2324} 2325 2326/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2327/// to an zero vector. 2328static bool isZeroShuffle(SDNode *N) { 2329 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2330 return false; 2331 2332 SDOperand V1 = N->getOperand(0); 2333 SDOperand V2 = N->getOperand(1); 2334 SDOperand Mask = N->getOperand(2); 2335 unsigned NumElems = Mask.getNumOperands(); 2336 for (unsigned i = 0; i != NumElems; ++i) { 2337 SDOperand Arg = Mask.getOperand(i); 2338 if (Arg.getOpcode() != ISD::UNDEF) { 2339 unsigned Idx = cast<ConstantSDNode>(Arg)->getValue(); 2340 if (Idx < NumElems) { 2341 unsigned Opc = V1.Val->getOpcode(); 2342 if (Opc == ISD::UNDEF) 2343 continue; 2344 if (Opc != ISD::BUILD_VECTOR || 2345 !isZeroNode(V1.Val->getOperand(Idx))) 2346 return false; 2347 } else if (Idx >= NumElems) { 2348 unsigned Opc = V2.Val->getOpcode(); 2349 if (Opc == ISD::UNDEF) 2350 continue; 2351 if (Opc != ISD::BUILD_VECTOR || 2352 !isZeroNode(V2.Val->getOperand(Idx - NumElems))) 2353 return false; 2354 } 2355 } 2356 } 2357 return true; 2358} 2359 2360/// getZeroVector - Returns a vector of specified type with all zero elements. 2361/// 2362static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) { 2363 assert(MVT::isVector(VT) && "Expected a vector type"); 2364 unsigned NumElems = MVT::getVectorNumElements(VT); 2365 MVT::ValueType EVT = MVT::getVectorElementType(VT); 2366 bool isFP = MVT::isFloatingPoint(EVT); 2367 SDOperand Zero = isFP ? DAG.getConstantFP(0.0, EVT) : DAG.getConstant(0, EVT); 2368 SmallVector<SDOperand, 8> ZeroVec(NumElems, Zero); 2369 return DAG.getNode(ISD::BUILD_VECTOR, VT, &ZeroVec[0], ZeroVec.size()); 2370} 2371 2372/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 2373/// that point to V2 points to its first element. 2374static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { 2375 assert(Mask.getOpcode() == ISD::BUILD_VECTOR); 2376 2377 bool Changed = false; 2378 SmallVector<SDOperand, 8> MaskVec; 2379 unsigned NumElems = Mask.getNumOperands(); 2380 for (unsigned i = 0; i != NumElems; ++i) { 2381 SDOperand Arg = Mask.getOperand(i); 2382 if (Arg.getOpcode() != ISD::UNDEF) { 2383 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2384 if (Val > NumElems) { 2385 Arg = DAG.getConstant(NumElems, Arg.getValueType()); 2386 Changed = true; 2387 } 2388 } 2389 MaskVec.push_back(Arg); 2390 } 2391 2392 if (Changed) 2393 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), 2394 &MaskVec[0], MaskVec.size()); 2395 return Mask; 2396} 2397 2398/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 2399/// operation of specified width. 2400static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { 2401 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2402 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2403 2404 SmallVector<SDOperand, 8> MaskVec; 2405 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); 2406 for (unsigned i = 1; i != NumElems; ++i) 2407 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2408 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2409} 2410 2411/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation 2412/// of specified width. 2413static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { 2414 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2415 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2416 SmallVector<SDOperand, 8> MaskVec; 2417 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 2418 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2419 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); 2420 } 2421 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2422} 2423 2424/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation 2425/// of specified width. 2426static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { 2427 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2428 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2429 unsigned Half = NumElems/2; 2430 SmallVector<SDOperand, 8> MaskVec; 2431 for (unsigned i = 0; i != Half; ++i) { 2432 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); 2433 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); 2434 } 2435 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2436} 2437 2438/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32. 2439/// 2440static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) { 2441 SDOperand V1 = Op.getOperand(0); 2442 SDOperand Mask = Op.getOperand(2); 2443 MVT::ValueType VT = Op.getValueType(); 2444 unsigned NumElems = Mask.getNumOperands(); 2445 Mask = getUnpacklMask(NumElems, DAG); 2446 while (NumElems != 4) { 2447 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask); 2448 NumElems >>= 1; 2449 } 2450 V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1); 2451 2452 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 2453 Mask = getZeroVector(MaskVT, DAG); 2454 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1, 2455 DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask); 2456 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); 2457} 2458 2459/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 2460/// vector of zero or undef vector. 2461static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT, 2462 unsigned NumElems, unsigned Idx, 2463 bool isZero, SelectionDAG &DAG) { 2464 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT); 2465 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2466 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 2467 SDOperand Zero = DAG.getConstant(0, EVT); 2468 SmallVector<SDOperand, 8> MaskVec(NumElems, Zero); 2469 MaskVec[Idx] = DAG.getConstant(NumElems, EVT); 2470 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2471 &MaskVec[0], MaskVec.size()); 2472 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2473} 2474 2475/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 2476/// 2477static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, 2478 unsigned NumNonZero, unsigned NumZero, 2479 SelectionDAG &DAG, TargetLowering &TLI) { 2480 if (NumNonZero > 8) 2481 return SDOperand(); 2482 2483 SDOperand V(0, 0); 2484 bool First = true; 2485 for (unsigned i = 0; i < 16; ++i) { 2486 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 2487 if (ThisIsNonZero && First) { 2488 if (NumZero) 2489 V = getZeroVector(MVT::v8i16, DAG); 2490 else 2491 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2492 First = false; 2493 } 2494 2495 if ((i & 1) != 0) { 2496 SDOperand ThisElt(0, 0), LastElt(0, 0); 2497 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 2498 if (LastIsNonZero) { 2499 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); 2500 } 2501 if (ThisIsNonZero) { 2502 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i)); 2503 ThisElt = DAG.getNode(ISD::SHL, MVT::i16, 2504 ThisElt, DAG.getConstant(8, MVT::i8)); 2505 if (LastIsNonZero) 2506 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt); 2507 } else 2508 ThisElt = LastElt; 2509 2510 if (ThisElt.Val) 2511 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt, 2512 DAG.getConstant(i/2, TLI.getPointerTy())); 2513 } 2514 } 2515 2516 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V); 2517} 2518 2519/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 2520/// 2521static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, 2522 unsigned NumNonZero, unsigned NumZero, 2523 SelectionDAG &DAG, TargetLowering &TLI) { 2524 if (NumNonZero > 4) 2525 return SDOperand(); 2526 2527 SDOperand V(0, 0); 2528 bool First = true; 2529 for (unsigned i = 0; i < 8; ++i) { 2530 bool isNonZero = (NonZeros & (1 << i)) != 0; 2531 if (isNonZero) { 2532 if (First) { 2533 if (NumZero) 2534 V = getZeroVector(MVT::v8i16, DAG); 2535 else 2536 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2537 First = false; 2538 } 2539 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i), 2540 DAG.getConstant(i, TLI.getPointerTy())); 2541 } 2542 } 2543 2544 return V; 2545} 2546 2547SDOperand 2548X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 2549 // All zero's are handled with pxor. 2550 if (ISD::isBuildVectorAllZeros(Op.Val)) 2551 return Op; 2552 2553 // All one's are handled with pcmpeqd. 2554 if (ISD::isBuildVectorAllOnes(Op.Val)) 2555 return Op; 2556 2557 MVT::ValueType VT = Op.getValueType(); 2558 MVT::ValueType EVT = MVT::getVectorElementType(VT); 2559 unsigned EVTBits = MVT::getSizeInBits(EVT); 2560 2561 unsigned NumElems = Op.getNumOperands(); 2562 unsigned NumZero = 0; 2563 unsigned NumNonZero = 0; 2564 unsigned NonZeros = 0; 2565 unsigned NumNonZeroImms = 0; 2566 std::set<SDOperand> Values; 2567 for (unsigned i = 0; i < NumElems; ++i) { 2568 SDOperand Elt = Op.getOperand(i); 2569 if (Elt.getOpcode() != ISD::UNDEF) { 2570 Values.insert(Elt); 2571 if (isZeroNode(Elt)) 2572 NumZero++; 2573 else { 2574 NonZeros |= (1 << i); 2575 NumNonZero++; 2576 if (Elt.getOpcode() == ISD::Constant || 2577 Elt.getOpcode() == ISD::ConstantFP) 2578 NumNonZeroImms++; 2579 } 2580 } 2581 } 2582 2583 if (NumNonZero == 0) { 2584 if (NumZero == 0) 2585 // All undef vector. Return an UNDEF. 2586 return DAG.getNode(ISD::UNDEF, VT); 2587 else 2588 // A mix of zero and undef. Return a zero vector. 2589 return getZeroVector(VT, DAG); 2590 } 2591 2592 // Splat is obviously ok. Let legalizer expand it to a shuffle. 2593 if (Values.size() == 1) 2594 return SDOperand(); 2595 2596 // Special case for single non-zero element. 2597 if (NumNonZero == 1) { 2598 unsigned Idx = CountTrailingZeros_32(NonZeros); 2599 SDOperand Item = Op.getOperand(Idx); 2600 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 2601 if (Idx == 0) 2602 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 2603 return getShuffleVectorZeroOrUndef(Item, VT, NumElems, Idx, 2604 NumZero > 0, DAG); 2605 2606 if (EVTBits == 32) { 2607 // Turn it into a shuffle of zero and zero-extended scalar to vector. 2608 Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0, 2609 DAG); 2610 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2611 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 2612 SmallVector<SDOperand, 8> MaskVec; 2613 for (unsigned i = 0; i < NumElems; i++) 2614 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); 2615 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2616 &MaskVec[0], MaskVec.size()); 2617 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item, 2618 DAG.getNode(ISD::UNDEF, VT), Mask); 2619 } 2620 } 2621 2622 // A vector full of immediates; various special cases are already 2623 // handled, so this is best done with a single constant-pool load. 2624 if (NumNonZero == NumNonZeroImms) 2625 return SDOperand(); 2626 2627 // Let legalizer expand 2-wide build_vectors. 2628 if (EVTBits == 64) 2629 return SDOperand(); 2630 2631 // If element VT is < 32 bits, convert it to inserts into a zero vector. 2632 if (EVTBits == 8 && NumElems == 16) { 2633 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 2634 *this); 2635 if (V.Val) return V; 2636 } 2637 2638 if (EVTBits == 16 && NumElems == 8) { 2639 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 2640 *this); 2641 if (V.Val) return V; 2642 } 2643 2644 // If element VT is == 32 bits, turn it into a number of shuffles. 2645 SmallVector<SDOperand, 8> V; 2646 V.resize(NumElems); 2647 if (NumElems == 4 && NumZero > 0) { 2648 for (unsigned i = 0; i < 4; ++i) { 2649 bool isZero = !(NonZeros & (1 << i)); 2650 if (isZero) 2651 V[i] = getZeroVector(VT, DAG); 2652 else 2653 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 2654 } 2655 2656 for (unsigned i = 0; i < 2; ++i) { 2657 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 2658 default: break; 2659 case 0: 2660 V[i] = V[i*2]; // Must be a zero vector. 2661 break; 2662 case 1: 2663 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2], 2664 getMOVLMask(NumElems, DAG)); 2665 break; 2666 case 2: 2667 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 2668 getMOVLMask(NumElems, DAG)); 2669 break; 2670 case 3: 2671 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 2672 getUnpacklMask(NumElems, DAG)); 2673 break; 2674 } 2675 } 2676 2677 // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd) 2678 // clears the upper bits. 2679 // FIXME: we can do the same for v4f32 case when we know both parts of 2680 // the lower half come from scalar_to_vector (loadf32). We should do 2681 // that in post legalizer dag combiner with target specific hooks. 2682 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0) 2683 return V[0]; 2684 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2685 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 2686 SmallVector<SDOperand, 8> MaskVec; 2687 bool Reverse = (NonZeros & 0x3) == 2; 2688 for (unsigned i = 0; i < 2; ++i) 2689 if (Reverse) 2690 MaskVec.push_back(DAG.getConstant(1-i, EVT)); 2691 else 2692 MaskVec.push_back(DAG.getConstant(i, EVT)); 2693 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 2694 for (unsigned i = 0; i < 2; ++i) 2695 if (Reverse) 2696 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); 2697 else 2698 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); 2699 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2700 &MaskVec[0], MaskVec.size()); 2701 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask); 2702 } 2703 2704 if (Values.size() > 2) { 2705 // Expand into a number of unpckl*. 2706 // e.g. for v4f32 2707 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 2708 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 2709 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 2710 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG); 2711 for (unsigned i = 0; i < NumElems; ++i) 2712 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 2713 NumElems >>= 1; 2714 while (NumElems != 0) { 2715 for (unsigned i = 0; i < NumElems; ++i) 2716 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems], 2717 UnpckMask); 2718 NumElems >>= 1; 2719 } 2720 return V[0]; 2721 } 2722 2723 return SDOperand(); 2724} 2725 2726SDOperand 2727X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 2728 SDOperand V1 = Op.getOperand(0); 2729 SDOperand V2 = Op.getOperand(1); 2730 SDOperand PermMask = Op.getOperand(2); 2731 MVT::ValueType VT = Op.getValueType(); 2732 unsigned NumElems = PermMask.getNumOperands(); 2733 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 2734 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 2735 bool V1IsSplat = false; 2736 bool V2IsSplat = false; 2737 2738 if (isUndefShuffle(Op.Val)) 2739 return DAG.getNode(ISD::UNDEF, VT); 2740 2741 if (isZeroShuffle(Op.Val)) 2742 return getZeroVector(VT, DAG); 2743 2744 if (isIdentityMask(PermMask.Val)) 2745 return V1; 2746 else if (isIdentityMask(PermMask.Val, true)) 2747 return V2; 2748 2749 if (isSplatMask(PermMask.Val)) { 2750 if (NumElems <= 4) return Op; 2751 // Promote it to a v4i32 splat. 2752 return PromoteSplat(Op, DAG); 2753 } 2754 2755 if (X86::isMOVLMask(PermMask.Val)) 2756 return (V1IsUndef) ? V2 : Op; 2757 2758 if (X86::isMOVSHDUPMask(PermMask.Val) || 2759 X86::isMOVSLDUPMask(PermMask.Val) || 2760 X86::isMOVHLPSMask(PermMask.Val) || 2761 X86::isMOVHPMask(PermMask.Val) || 2762 X86::isMOVLPMask(PermMask.Val)) 2763 return Op; 2764 2765 if (ShouldXformToMOVHLPS(PermMask.Val) || 2766 ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val)) 2767 return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 2768 2769 bool Commuted = false; 2770 V1IsSplat = isSplatVector(V1.Val); 2771 V2IsSplat = isSplatVector(V2.Val); 2772 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 2773 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 2774 std::swap(V1IsSplat, V2IsSplat); 2775 std::swap(V1IsUndef, V2IsUndef); 2776 Commuted = true; 2777 } 2778 2779 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) { 2780 if (V2IsUndef) return V1; 2781 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 2782 if (V2IsSplat) { 2783 // V2 is a splat, so the mask may be malformed. That is, it may point 2784 // to any V2 element. The instruction selectior won't like this. Get 2785 // a corrected mask and commute to form a proper MOVS{S|D}. 2786 SDOperand NewMask = getMOVLMask(NumElems, DAG); 2787 if (NewMask.Val != PermMask.Val) 2788 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 2789 } 2790 return Op; 2791 } 2792 2793 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 2794 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 2795 X86::isUNPCKLMask(PermMask.Val) || 2796 X86::isUNPCKHMask(PermMask.Val)) 2797 return Op; 2798 2799 if (V2IsSplat) { 2800 // Normalize mask so all entries that point to V2 points to its first 2801 // element then try to match unpck{h|l} again. If match, return a 2802 // new vector_shuffle with the corrected mask. 2803 SDOperand NewMask = NormalizeMask(PermMask, DAG); 2804 if (NewMask.Val != PermMask.Val) { 2805 if (X86::isUNPCKLMask(PermMask.Val, true)) { 2806 SDOperand NewMask = getUnpacklMask(NumElems, DAG); 2807 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 2808 } else if (X86::isUNPCKHMask(PermMask.Val, true)) { 2809 SDOperand NewMask = getUnpackhMask(NumElems, DAG); 2810 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 2811 } 2812 } 2813 } 2814 2815 // Normalize the node to match x86 shuffle ops if needed 2816 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val)) 2817 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 2818 2819 if (Commuted) { 2820 // Commute is back and try unpck* again. 2821 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 2822 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 2823 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 2824 X86::isUNPCKLMask(PermMask.Val) || 2825 X86::isUNPCKHMask(PermMask.Val)) 2826 return Op; 2827 } 2828 2829 // If VT is integer, try PSHUF* first, then SHUFP*. 2830 if (MVT::isInteger(VT)) { 2831 // MMX doesn't have PSHUFD; it does have PSHUFW. While it's theoretically 2832 // possible to shuffle a v2i32 using PSHUFW, that's not yet implemented. 2833 if (((MVT::getSizeInBits(VT) != 64 || NumElems == 4) && 2834 X86::isPSHUFDMask(PermMask.Val)) || 2835 X86::isPSHUFHWMask(PermMask.Val) || 2836 X86::isPSHUFLWMask(PermMask.Val)) { 2837 if (V2.getOpcode() != ISD::UNDEF) 2838 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 2839 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 2840 return Op; 2841 } 2842 2843 if (X86::isSHUFPMask(PermMask.Val) && 2844 MVT::getSizeInBits(VT) != 64) // Don't do this for MMX. 2845 return Op; 2846 2847 // Handle v8i16 shuffle high / low shuffle node pair. 2848 if (VT == MVT::v8i16 && isPSHUFHW_PSHUFLWMask(PermMask.Val)) { 2849 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2850 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2851 SmallVector<SDOperand, 8> MaskVec; 2852 for (unsigned i = 0; i != 4; ++i) 2853 MaskVec.push_back(PermMask.getOperand(i)); 2854 for (unsigned i = 4; i != 8; ++i) 2855 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2856 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2857 &MaskVec[0], MaskVec.size()); 2858 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2859 MaskVec.clear(); 2860 for (unsigned i = 0; i != 4; ++i) 2861 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2862 for (unsigned i = 4; i != 8; ++i) 2863 MaskVec.push_back(PermMask.getOperand(i)); 2864 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0],MaskVec.size()); 2865 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2866 } 2867 } else { 2868 // Floating point cases in the other order. 2869 if (X86::isSHUFPMask(PermMask.Val)) 2870 return Op; 2871 if (X86::isPSHUFDMask(PermMask.Val) || 2872 X86::isPSHUFHWMask(PermMask.Val) || 2873 X86::isPSHUFLWMask(PermMask.Val)) { 2874 if (V2.getOpcode() != ISD::UNDEF) 2875 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 2876 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 2877 return Op; 2878 } 2879 } 2880 2881 if (NumElems == 4 && 2882 // Don't do this for MMX. 2883 MVT::getSizeInBits(VT) != 64) { 2884 MVT::ValueType MaskVT = PermMask.getValueType(); 2885 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 2886 SmallVector<std::pair<int, int>, 8> Locs; 2887 Locs.reserve(NumElems); 2888 SmallVector<SDOperand, 8> Mask1(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 2889 SmallVector<SDOperand, 8> Mask2(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 2890 unsigned NumHi = 0; 2891 unsigned NumLo = 0; 2892 // If no more than two elements come from either vector. This can be 2893 // implemented with two shuffles. First shuffle gather the elements. 2894 // The second shuffle, which takes the first shuffle as both of its 2895 // vector operands, put the elements into the right order. 2896 for (unsigned i = 0; i != NumElems; ++i) { 2897 SDOperand Elt = PermMask.getOperand(i); 2898 if (Elt.getOpcode() == ISD::UNDEF) { 2899 Locs[i] = std::make_pair(-1, -1); 2900 } else { 2901 unsigned Val = cast<ConstantSDNode>(Elt)->getValue(); 2902 if (Val < NumElems) { 2903 Locs[i] = std::make_pair(0, NumLo); 2904 Mask1[NumLo] = Elt; 2905 NumLo++; 2906 } else { 2907 Locs[i] = std::make_pair(1, NumHi); 2908 if (2+NumHi < NumElems) 2909 Mask1[2+NumHi] = Elt; 2910 NumHi++; 2911 } 2912 } 2913 } 2914 if (NumLo <= 2 && NumHi <= 2) { 2915 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 2916 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2917 &Mask1[0], Mask1.size())); 2918 for (unsigned i = 0; i != NumElems; ++i) { 2919 if (Locs[i].first == -1) 2920 continue; 2921 else { 2922 unsigned Idx = (i < NumElems/2) ? 0 : NumElems; 2923 Idx += Locs[i].first * (NumElems/2) + Locs[i].second; 2924 Mask2[i] = DAG.getConstant(Idx, MaskEVT); 2925 } 2926 } 2927 2928 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, 2929 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2930 &Mask2[0], Mask2.size())); 2931 } 2932 2933 // Break it into (shuffle shuffle_hi, shuffle_lo). 2934 Locs.clear(); 2935 SmallVector<SDOperand,8> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 2936 SmallVector<SDOperand,8> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 2937 SmallVector<SDOperand,8> *MaskPtr = &LoMask; 2938 unsigned MaskIdx = 0; 2939 unsigned LoIdx = 0; 2940 unsigned HiIdx = NumElems/2; 2941 for (unsigned i = 0; i != NumElems; ++i) { 2942 if (i == NumElems/2) { 2943 MaskPtr = &HiMask; 2944 MaskIdx = 1; 2945 LoIdx = 0; 2946 HiIdx = NumElems/2; 2947 } 2948 SDOperand Elt = PermMask.getOperand(i); 2949 if (Elt.getOpcode() == ISD::UNDEF) { 2950 Locs[i] = std::make_pair(-1, -1); 2951 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) { 2952 Locs[i] = std::make_pair(MaskIdx, LoIdx); 2953 (*MaskPtr)[LoIdx] = Elt; 2954 LoIdx++; 2955 } else { 2956 Locs[i] = std::make_pair(MaskIdx, HiIdx); 2957 (*MaskPtr)[HiIdx] = Elt; 2958 HiIdx++; 2959 } 2960 } 2961 2962 SDOperand LoShuffle = 2963 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 2964 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2965 &LoMask[0], LoMask.size())); 2966 SDOperand HiShuffle = 2967 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 2968 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2969 &HiMask[0], HiMask.size())); 2970 SmallVector<SDOperand, 8> MaskOps; 2971 for (unsigned i = 0; i != NumElems; ++i) { 2972 if (Locs[i].first == -1) { 2973 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 2974 } else { 2975 unsigned Idx = Locs[i].first * NumElems + Locs[i].second; 2976 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT)); 2977 } 2978 } 2979 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle, 2980 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2981 &MaskOps[0], MaskOps.size())); 2982 } 2983 2984 return SDOperand(); 2985} 2986 2987SDOperand 2988X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 2989 if (!isa<ConstantSDNode>(Op.getOperand(1))) 2990 return SDOperand(); 2991 2992 MVT::ValueType VT = Op.getValueType(); 2993 // TODO: handle v16i8. 2994 if (MVT::getSizeInBits(VT) == 16) { 2995 // Transform it so it match pextrw which produces a 32-bit result. 2996 MVT::ValueType EVT = (MVT::ValueType)(VT+1); 2997 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, 2998 Op.getOperand(0), Op.getOperand(1)); 2999 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, 3000 DAG.getValueType(VT)); 3001 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3002 } else if (MVT::getSizeInBits(VT) == 32) { 3003 SDOperand Vec = Op.getOperand(0); 3004 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3005 if (Idx == 0) 3006 return Op; 3007 // SHUFPS the element to the lowest double word, then movss. 3008 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3009 SmallVector<SDOperand, 8> IdxVec; 3010 IdxVec.push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT))); 3011 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3012 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3013 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3014 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3015 &IdxVec[0], IdxVec.size()); 3016 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3017 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3018 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3019 DAG.getConstant(0, getPointerTy())); 3020 } else if (MVT::getSizeInBits(VT) == 64) { 3021 SDOperand Vec = Op.getOperand(0); 3022 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3023 if (Idx == 0) 3024 return Op; 3025 3026 // UNPCKHPD the element to the lowest double word, then movsd. 3027 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 3028 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 3029 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3030 SmallVector<SDOperand, 8> IdxVec; 3031 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT))); 3032 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3033 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3034 &IdxVec[0], IdxVec.size()); 3035 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3036 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3037 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3038 DAG.getConstant(0, getPointerTy())); 3039 } 3040 3041 return SDOperand(); 3042} 3043 3044SDOperand 3045X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3046 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 3047 // as its second argument. 3048 MVT::ValueType VT = Op.getValueType(); 3049 MVT::ValueType BaseVT = MVT::getVectorElementType(VT); 3050 SDOperand N0 = Op.getOperand(0); 3051 SDOperand N1 = Op.getOperand(1); 3052 SDOperand N2 = Op.getOperand(2); 3053 if (MVT::getSizeInBits(BaseVT) == 16) { 3054 if (N1.getValueType() != MVT::i32) 3055 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3056 if (N2.getValueType() != MVT::i32) 3057 N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(),getPointerTy()); 3058 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2); 3059 } else if (MVT::getSizeInBits(BaseVT) == 32) { 3060 unsigned Idx = cast<ConstantSDNode>(N2)->getValue(); 3061 if (Idx == 0) { 3062 // Use a movss. 3063 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, N1); 3064 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3065 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 3066 SmallVector<SDOperand, 8> MaskVec; 3067 MaskVec.push_back(DAG.getConstant(4, BaseVT)); 3068 for (unsigned i = 1; i <= 3; ++i) 3069 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3070 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, N0, N1, 3071 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3072 &MaskVec[0], MaskVec.size())); 3073 } else { 3074 // Use two pinsrw instructions to insert a 32 bit value. 3075 Idx <<= 1; 3076 if (MVT::isFloatingPoint(N1.getValueType())) { 3077 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v4f32, N1); 3078 N1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, N1); 3079 N1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, N1, 3080 DAG.getConstant(0, getPointerTy())); 3081 } 3082 N0 = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, N0); 3083 N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1, 3084 DAG.getConstant(Idx, getPointerTy())); 3085 N1 = DAG.getNode(ISD::SRL, MVT::i32, N1, DAG.getConstant(16, MVT::i8)); 3086 N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1, 3087 DAG.getConstant(Idx+1, getPointerTy())); 3088 return DAG.getNode(ISD::BIT_CONVERT, VT, N0); 3089 } 3090 } 3091 3092 return SDOperand(); 3093} 3094 3095SDOperand 3096X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3097 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); 3098 return DAG.getNode(X86ISD::S2VEC, Op.getValueType(), AnyExt); 3099} 3100 3101// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 3102// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 3103// one of the above mentioned nodes. It has to be wrapped because otherwise 3104// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 3105// be used to form addressing mode. These wrapped nodes will be selected 3106// into MOV32ri. 3107SDOperand 3108X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 3109 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 3110 SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(), 3111 getPointerTy(), 3112 CP->getAlignment()); 3113 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3114 // With PIC, the address is actually $g + Offset. 3115 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3116 !Subtarget->isPICStyleRIPRel()) { 3117 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3118 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3119 Result); 3120 } 3121 3122 return Result; 3123} 3124 3125SDOperand 3126X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 3127 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3128 SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); 3129 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3130 // With PIC, the address is actually $g + Offset. 3131 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3132 !Subtarget->isPICStyleRIPRel()) { 3133 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3134 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3135 Result); 3136 } 3137 3138 // For Darwin & Mingw32, external and weak symbols are indirect, so we want to 3139 // load the value at address GV, not the value of GV itself. This means that 3140 // the GlobalAddress must be in the base or index register of the address, not 3141 // the GV offset field. Platform check is inside GVRequiresExtraLoad() call 3142 // The same applies for external symbols during PIC codegen 3143 if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)) 3144 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, NULL, 0); 3145 3146 return Result; 3147} 3148 3149// Lower ISD::GlobalTLSAddress using the "general dynamic" model 3150static SDOperand 3151LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 3152 const MVT::ValueType PtrVT) { 3153 SDOperand InFlag; 3154 SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX, 3155 DAG.getNode(X86ISD::GlobalBaseReg, 3156 PtrVT), InFlag); 3157 InFlag = Chain.getValue(1); 3158 3159 // emit leal symbol@TLSGD(,%ebx,1), %eax 3160 SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag); 3161 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 3162 GA->getValueType(0), 3163 GA->getOffset()); 3164 SDOperand Ops[] = { Chain, TGA, InFlag }; 3165 SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3); 3166 InFlag = Result.getValue(2); 3167 Chain = Result.getValue(1); 3168 3169 // call ___tls_get_addr. This function receives its argument in 3170 // the register EAX. 3171 Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag); 3172 InFlag = Chain.getValue(1); 3173 3174 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 3175 SDOperand Ops1[] = { Chain, 3176 DAG.getTargetExternalSymbol("___tls_get_addr", 3177 PtrVT), 3178 DAG.getRegister(X86::EAX, PtrVT), 3179 DAG.getRegister(X86::EBX, PtrVT), 3180 InFlag }; 3181 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5); 3182 InFlag = Chain.getValue(1); 3183 3184 return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag); 3185} 3186 3187// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 3188// "local exec" model. 3189static SDOperand 3190LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 3191 const MVT::ValueType PtrVT) { 3192 // Get the Thread Pointer 3193 SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT); 3194 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 3195 // exec) 3196 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 3197 GA->getValueType(0), 3198 GA->getOffset()); 3199 SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA); 3200 3201 if (GA->getGlobal()->isDeclaration()) // initial exec TLS model 3202 Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, NULL, 0); 3203 3204 // The address of the thread local variable is the add of the thread 3205 // pointer with the offset of the variable. 3206 return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset); 3207} 3208 3209SDOperand 3210X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { 3211 // TODO: implement the "local dynamic" model 3212 // TODO: implement the "initial exec"model for pic executables 3213 assert(!Subtarget->is64Bit() && Subtarget->isTargetELF() && 3214 "TLS not implemented for non-ELF and 64-bit targets"); 3215 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 3216 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 3217 // otherwise use the "Local Exec"TLS Model 3218 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 3219 return LowerToTLSGeneralDynamicModel(GA, DAG, getPointerTy()); 3220 else 3221 return LowerToTLSExecModel(GA, DAG, getPointerTy()); 3222} 3223 3224SDOperand 3225X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { 3226 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 3227 SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 3228 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3229 // With PIC, the address is actually $g + Offset. 3230 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3231 !Subtarget->isPICStyleRIPRel()) { 3232 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3233 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3234 Result); 3235 } 3236 3237 return Result; 3238} 3239 3240SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 3241 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 3242 SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); 3243 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3244 // With PIC, the address is actually $g + Offset. 3245 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3246 !Subtarget->isPICStyleRIPRel()) { 3247 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3248 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3249 Result); 3250 } 3251 3252 return Result; 3253} 3254 3255SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { 3256 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 3257 "Not an i64 shift!"); 3258 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 3259 SDOperand ShOpLo = Op.getOperand(0); 3260 SDOperand ShOpHi = Op.getOperand(1); 3261 SDOperand ShAmt = Op.getOperand(2); 3262 SDOperand Tmp1 = isSRA ? 3263 DAG.getNode(ISD::SRA, MVT::i32, ShOpHi, DAG.getConstant(31, MVT::i8)) : 3264 DAG.getConstant(0, MVT::i32); 3265 3266 SDOperand Tmp2, Tmp3; 3267 if (Op.getOpcode() == ISD::SHL_PARTS) { 3268 Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt); 3269 Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt); 3270 } else { 3271 Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt); 3272 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt); 3273 } 3274 3275 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 3276 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, 3277 DAG.getConstant(32, MVT::i8)); 3278 SDOperand COps[]={DAG.getEntryNode(), AndNode, DAG.getConstant(0, MVT::i8)}; 3279 SDOperand InFlag = DAG.getNode(X86ISD::CMP, VTs, 2, COps, 3).getValue(1); 3280 3281 SDOperand Hi, Lo; 3282 SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); 3283 3284 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag); 3285 SmallVector<SDOperand, 4> Ops; 3286 if (Op.getOpcode() == ISD::SHL_PARTS) { 3287 Ops.push_back(Tmp2); 3288 Ops.push_back(Tmp3); 3289 Ops.push_back(CC); 3290 Ops.push_back(InFlag); 3291 Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3292 InFlag = Hi.getValue(1); 3293 3294 Ops.clear(); 3295 Ops.push_back(Tmp3); 3296 Ops.push_back(Tmp1); 3297 Ops.push_back(CC); 3298 Ops.push_back(InFlag); 3299 Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3300 } else { 3301 Ops.push_back(Tmp2); 3302 Ops.push_back(Tmp3); 3303 Ops.push_back(CC); 3304 Ops.push_back(InFlag); 3305 Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3306 InFlag = Lo.getValue(1); 3307 3308 Ops.clear(); 3309 Ops.push_back(Tmp3); 3310 Ops.push_back(Tmp1); 3311 Ops.push_back(CC); 3312 Ops.push_back(InFlag); 3313 Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3314 } 3315 3316 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32); 3317 Ops.clear(); 3318 Ops.push_back(Lo); 3319 Ops.push_back(Hi); 3320 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size()); 3321} 3322 3323SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 3324 assert(Op.getOperand(0).getValueType() <= MVT::i64 && 3325 Op.getOperand(0).getValueType() >= MVT::i16 && 3326 "Unknown SINT_TO_FP to lower!"); 3327 3328 SDOperand Result; 3329 MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); 3330 unsigned Size = MVT::getSizeInBits(SrcVT)/8; 3331 MachineFunction &MF = DAG.getMachineFunction(); 3332 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 3333 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3334 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), 3335 StackSlot, NULL, 0); 3336 3337 // These are really Legal; caller falls through into that case. 3338 if (SrcVT==MVT::i32 && Op.getValueType() != MVT::f80 && X86ScalarSSE) 3339 return Result; 3340 if (SrcVT==MVT::i64 && Op.getValueType() != MVT::f80 && 3341 Subtarget->is64Bit()) 3342 return Result; 3343 3344 // Build the FILD 3345 SDVTList Tys; 3346 bool useSSE = X86ScalarSSE && Op.getValueType() != MVT::f80; 3347 if (useSSE) 3348 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 3349 else 3350 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 3351 SmallVector<SDOperand, 8> Ops; 3352 Ops.push_back(Chain); 3353 Ops.push_back(StackSlot); 3354 Ops.push_back(DAG.getValueType(SrcVT)); 3355 Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG :X86ISD::FILD, 3356 Tys, &Ops[0], Ops.size()); 3357 3358 if (useSSE) { 3359 Chain = Result.getValue(1); 3360 SDOperand InFlag = Result.getValue(2); 3361 3362 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 3363 // shouldn't be necessary except that RFP cannot be live across 3364 // multiple blocks. When stackifier is fixed, they can be uncoupled. 3365 MachineFunction &MF = DAG.getMachineFunction(); 3366 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 3367 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3368 Tys = DAG.getVTList(MVT::Other); 3369 SmallVector<SDOperand, 8> Ops; 3370 Ops.push_back(Chain); 3371 Ops.push_back(Result); 3372 Ops.push_back(StackSlot); 3373 Ops.push_back(DAG.getValueType(Op.getValueType())); 3374 Ops.push_back(InFlag); 3375 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 3376 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, NULL, 0); 3377 } 3378 3379 return Result; 3380} 3381 3382SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 3383 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 && 3384 "Unknown FP_TO_SINT to lower!"); 3385 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 3386 // stack slot. 3387 SDOperand Result; 3388 MachineFunction &MF = DAG.getMachineFunction(); 3389 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; 3390 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 3391 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3392 3393 // These are really Legal. 3394 if (Op.getValueType() == MVT::i32 && X86ScalarSSE && 3395 Op.getOperand(0).getValueType() != MVT::f80) 3396 return Result; 3397 if (Subtarget->is64Bit() && 3398 Op.getValueType() == MVT::i64 && 3399 Op.getOperand(0).getValueType() != MVT::f80) 3400 return Result; 3401 3402 unsigned Opc; 3403 switch (Op.getValueType()) { 3404 default: assert(0 && "Invalid FP_TO_SINT to lower!"); 3405 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 3406 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 3407 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 3408 } 3409 3410 SDOperand Chain = DAG.getEntryNode(); 3411 SDOperand Value = Op.getOperand(0); 3412 if (X86ScalarSSE && Op.getOperand(0).getValueType() != MVT::f80) { 3413 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 3414 Chain = DAG.getStore(Chain, Value, StackSlot, NULL, 0); 3415 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 3416 SDOperand Ops[] = { 3417 Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType()) 3418 }; 3419 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 3420 Chain = Value.getValue(1); 3421 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 3422 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3423 } 3424 3425 // Build the FP_TO_INT*_IN_MEM 3426 SDOperand Ops[] = { Chain, Value, StackSlot }; 3427 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3); 3428 3429 // Load the result. 3430 return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0); 3431} 3432 3433SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { 3434 MVT::ValueType VT = Op.getValueType(); 3435 MVT::ValueType EltVT = VT; 3436 if (MVT::isVector(VT)) 3437 EltVT = MVT::getVectorElementType(VT); 3438 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 3439 std::vector<Constant*> CV; 3440 if (EltVT == MVT::f64) { 3441 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, ~(1ULL << 63)))); 3442 CV.push_back(C); 3443 CV.push_back(C); 3444 } else { 3445 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, ~(1U << 31)))); 3446 CV.push_back(C); 3447 CV.push_back(C); 3448 CV.push_back(C); 3449 CV.push_back(C); 3450 } 3451 Constant *C = ConstantVector::get(CV); 3452 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 3453 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0, 3454 false, 16); 3455 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask); 3456} 3457 3458SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { 3459 MVT::ValueType VT = Op.getValueType(); 3460 MVT::ValueType EltVT = VT; 3461 unsigned EltNum = 1; 3462 if (MVT::isVector(VT)) { 3463 EltVT = MVT::getVectorElementType(VT); 3464 EltNum = MVT::getVectorNumElements(VT); 3465 } 3466 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 3467 std::vector<Constant*> CV; 3468 if (EltVT == MVT::f64) { 3469 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, 1ULL << 63))); 3470 CV.push_back(C); 3471 CV.push_back(C); 3472 } else { 3473 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, 1U << 31))); 3474 CV.push_back(C); 3475 CV.push_back(C); 3476 CV.push_back(C); 3477 CV.push_back(C); 3478 } 3479 Constant *C = ConstantVector::get(CV); 3480 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 3481 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0, 3482 false, 16); 3483 if (MVT::isVector(VT)) { 3484 return DAG.getNode(ISD::BIT_CONVERT, VT, 3485 DAG.getNode(ISD::XOR, MVT::v2i64, 3486 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)), 3487 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Mask))); 3488 } else { 3489 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask); 3490 } 3491} 3492 3493SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { 3494 SDOperand Op0 = Op.getOperand(0); 3495 SDOperand Op1 = Op.getOperand(1); 3496 MVT::ValueType VT = Op.getValueType(); 3497 MVT::ValueType SrcVT = Op1.getValueType(); 3498 const Type *SrcTy = MVT::getTypeForValueType(SrcVT); 3499 3500 // If second operand is smaller, extend it first. 3501 if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) { 3502 Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1); 3503 SrcVT = VT; 3504 SrcTy = MVT::getTypeForValueType(SrcVT); 3505 } 3506 3507 // First get the sign bit of second operand. 3508 std::vector<Constant*> CV; 3509 if (SrcVT == MVT::f64) { 3510 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 1ULL << 63)))); 3511 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 3512 } else { 3513 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 1U << 31)))); 3514 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 3515 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 3516 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 3517 } 3518 Constant *C = ConstantVector::get(CV); 3519 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 3520 SDOperand Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx, NULL, 0, 3521 false, 16); 3522 SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); 3523 3524 // Shift sign bit right or left if the two operands have different types. 3525 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 3526 // Op0 is MVT::f32, Op1 is MVT::f64. 3527 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit); 3528 SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit, 3529 DAG.getConstant(32, MVT::i32)); 3530 SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit); 3531 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit, 3532 DAG.getConstant(0, getPointerTy())); 3533 } 3534 3535 // Clear first operand sign bit. 3536 CV.clear(); 3537 if (VT == MVT::f64) { 3538 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, ~(1ULL << 63))))); 3539 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 3540 } else { 3541 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, ~(1U << 31))))); 3542 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 3543 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 3544 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 3545 } 3546 C = ConstantVector::get(CV); 3547 CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 3548 SDOperand Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0, 3549 false, 16); 3550 SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2); 3551 3552 // Or the value with the sign bit. 3553 return DAG.getNode(X86ISD::FOR, VT, Val, SignBit); 3554} 3555 3556SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG, 3557 SDOperand Chain) { 3558 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 3559 SDOperand Cond; 3560 SDOperand Op0 = Op.getOperand(0); 3561 SDOperand Op1 = Op.getOperand(1); 3562 SDOperand CC = Op.getOperand(2); 3563 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 3564 const MVT::ValueType *VTs1 = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 3565 const MVT::ValueType *VTs2 = DAG.getNodeValueTypes(MVT::i8, MVT::Flag); 3566 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); 3567 unsigned X86CC; 3568 3569 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, 3570 Op0, Op1, DAG)) { 3571 SDOperand Ops1[] = { Chain, Op0, Op1 }; 3572 Cond = DAG.getNode(X86ISD::CMP, VTs1, 2, Ops1, 3).getValue(1); 3573 SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond }; 3574 return DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2); 3575 } 3576 3577 assert(isFP && "Illegal integer SetCC!"); 3578 3579 SDOperand COps[] = { Chain, Op0, Op1 }; 3580 Cond = DAG.getNode(X86ISD::CMP, VTs1, 2, COps, 3).getValue(1); 3581 3582 switch (SetCCOpcode) { 3583 default: assert(false && "Illegal floating point SetCC!"); 3584 case ISD::SETOEQ: { // !PF & ZF 3585 SDOperand Ops1[] = { DAG.getConstant(X86::COND_NP, MVT::i8), Cond }; 3586 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops1, 2); 3587 SDOperand Ops2[] = { DAG.getConstant(X86::COND_E, MVT::i8), 3588 Tmp1.getValue(1) }; 3589 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2); 3590 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); 3591 } 3592 case ISD::SETUNE: { // PF | !ZF 3593 SDOperand Ops1[] = { DAG.getConstant(X86::COND_P, MVT::i8), Cond }; 3594 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops1, 2); 3595 SDOperand Ops2[] = { DAG.getConstant(X86::COND_NE, MVT::i8), 3596 Tmp1.getValue(1) }; 3597 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2); 3598 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); 3599 } 3600 } 3601} 3602 3603SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { 3604 bool addTest = true; 3605 SDOperand Chain = DAG.getEntryNode(); 3606 SDOperand Cond = Op.getOperand(0); 3607 SDOperand CC; 3608 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 3609 3610 if (Cond.getOpcode() == ISD::SETCC) 3611 Cond = LowerSETCC(Cond, DAG, Chain); 3612 3613 if (Cond.getOpcode() == X86ISD::SETCC) { 3614 CC = Cond.getOperand(0); 3615 3616 // If condition flag is set by a X86ISD::CMP, then make a copy of it 3617 // (since flag operand cannot be shared). Use it as the condition setting 3618 // operand in place of the X86ISD::SETCC. 3619 // If the X86ISD::SETCC has more than one use, then perhaps it's better 3620 // to use a test instead of duplicating the X86ISD::CMP (for register 3621 // pressure reason)? 3622 SDOperand Cmp = Cond.getOperand(1); 3623 unsigned Opc = Cmp.getOpcode(); 3624 bool IllegalFPCMov = !X86ScalarSSE && 3625 MVT::isFloatingPoint(Op.getValueType()) && 3626 !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 3627 if ((Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) && 3628 !IllegalFPCMov) { 3629 SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) }; 3630 Cond = DAG.getNode(Opc, VTs, 2, Ops, 3); 3631 addTest = false; 3632 } 3633 } 3634 3635 if (addTest) { 3636 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 3637 SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) }; 3638 Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3); 3639 } 3640 3641 VTs = DAG.getNodeValueTypes(Op.getValueType(), MVT::Flag); 3642 SmallVector<SDOperand, 4> Ops; 3643 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 3644 // condition is true. 3645 Ops.push_back(Op.getOperand(2)); 3646 Ops.push_back(Op.getOperand(1)); 3647 Ops.push_back(CC); 3648 Ops.push_back(Cond.getValue(1)); 3649 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3650} 3651 3652SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { 3653 bool addTest = true; 3654 SDOperand Chain = Op.getOperand(0); 3655 SDOperand Cond = Op.getOperand(1); 3656 SDOperand Dest = Op.getOperand(2); 3657 SDOperand CC; 3658 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 3659 3660 if (Cond.getOpcode() == ISD::SETCC) 3661 Cond = LowerSETCC(Cond, DAG, Chain); 3662 3663 if (Cond.getOpcode() == X86ISD::SETCC) { 3664 CC = Cond.getOperand(0); 3665 3666 // If condition flag is set by a X86ISD::CMP, then make a copy of it 3667 // (since flag operand cannot be shared). Use it as the condition setting 3668 // operand in place of the X86ISD::SETCC. 3669 // If the X86ISD::SETCC has more than one use, then perhaps it's better 3670 // to use a test instead of duplicating the X86ISD::CMP (for register 3671 // pressure reason)? 3672 SDOperand Cmp = Cond.getOperand(1); 3673 unsigned Opc = Cmp.getOpcode(); 3674 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) { 3675 SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) }; 3676 Cond = DAG.getNode(Opc, VTs, 2, Ops, 3); 3677 addTest = false; 3678 } 3679 } 3680 3681 if (addTest) { 3682 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 3683 SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) }; 3684 Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3); 3685 } 3686 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), 3687 Cond, Op.getOperand(2), CC, Cond.getValue(1)); 3688} 3689 3690SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { 3691 unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3692 3693 if (Subtarget->is64Bit()) 3694 return LowerX86_64CCCCallTo(Op, DAG, CallingConv); 3695 else 3696 switch (CallingConv) { 3697 default: 3698 assert(0 && "Unsupported calling convention"); 3699 case CallingConv::Fast: 3700 // TODO: Implement fastcc 3701 // Falls through 3702 case CallingConv::C: 3703 case CallingConv::X86_StdCall: 3704 return LowerCCCCallTo(Op, DAG, CallingConv); 3705 case CallingConv::X86_FastCall: 3706 return LowerFastCCCallTo(Op, DAG, CallingConv); 3707 } 3708} 3709 3710 3711// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 3712// Calls to _alloca is needed to probe the stack when allocating more than 4k 3713// bytes in one go. Touching the stack at 4K increments is necessary to ensure 3714// that the guard pages used by the OS virtual memory manager are allocated in 3715// correct sequence. 3716SDOperand 3717X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, 3718 SelectionDAG &DAG) { 3719 assert(Subtarget->isTargetCygMing() && 3720 "This should be used only on Cygwin/Mingw targets"); 3721 3722 // Get the inputs. 3723 SDOperand Chain = Op.getOperand(0); 3724 SDOperand Size = Op.getOperand(1); 3725 // FIXME: Ensure alignment here 3726 3727 SDOperand Flag; 3728 3729 MVT::ValueType IntPtr = getPointerTy(); 3730 MVT::ValueType SPTy = (Subtarget->is64Bit() ? MVT::i64 : MVT::i32); 3731 3732 Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag); 3733 Flag = Chain.getValue(1); 3734 3735 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 3736 SDOperand Ops[] = { Chain, 3737 DAG.getTargetExternalSymbol("_alloca", IntPtr), 3738 DAG.getRegister(X86::EAX, IntPtr), 3739 Flag }; 3740 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4); 3741 Flag = Chain.getValue(1); 3742 3743 Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1); 3744 3745 std::vector<MVT::ValueType> Tys; 3746 Tys.push_back(SPTy); 3747 Tys.push_back(MVT::Other); 3748 SDOperand Ops1[2] = { Chain.getValue(0), Chain }; 3749 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2); 3750} 3751 3752SDOperand 3753X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { 3754 MachineFunction &MF = DAG.getMachineFunction(); 3755 const Function* Fn = MF.getFunction(); 3756 if (Fn->hasExternalLinkage() && 3757 Subtarget->isTargetCygMing() && 3758 Fn->getName() == "main") 3759 MF.getInfo<X86MachineFunctionInfo>()->setForceFramePointer(true); 3760 3761 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3762 if (Subtarget->is64Bit()) 3763 return LowerX86_64CCCArguments(Op, DAG); 3764 else 3765 switch(CC) { 3766 default: 3767 assert(0 && "Unsupported calling convention"); 3768 case CallingConv::Fast: 3769 // TODO: implement fastcc. 3770 3771 // Falls through 3772 case CallingConv::C: 3773 return LowerCCCArguments(Op, DAG); 3774 case CallingConv::X86_StdCall: 3775 MF.getInfo<X86MachineFunctionInfo>()->setDecorationStyle(StdCall); 3776 return LowerCCCArguments(Op, DAG, true); 3777 case CallingConv::X86_FastCall: 3778 MF.getInfo<X86MachineFunctionInfo>()->setDecorationStyle(FastCall); 3779 return LowerFastCCArguments(Op, DAG); 3780 } 3781} 3782 3783SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) { 3784 SDOperand InFlag(0, 0); 3785 SDOperand Chain = Op.getOperand(0); 3786 unsigned Align = 3787 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 3788 if (Align == 0) Align = 1; 3789 3790 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 3791 // If not DWORD aligned or size is more than the threshold, call memset. 3792 // The libc version is likely to be faster for these cases. It can use the 3793 // address value and run time information about the CPU. 3794 if ((Align & 3) != 0 || 3795 (I && I->getValue() > Subtarget->getMinRepStrSizeThreshold())) { 3796 MVT::ValueType IntPtr = getPointerTy(); 3797 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 3798 TargetLowering::ArgListTy Args; 3799 TargetLowering::ArgListEntry Entry; 3800 Entry.Node = Op.getOperand(1); 3801 Entry.Ty = IntPtrTy; 3802 Args.push_back(Entry); 3803 // Extend the unsigned i8 argument to be an int value for the call. 3804 Entry.Node = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2)); 3805 Entry.Ty = IntPtrTy; 3806 Args.push_back(Entry); 3807 Entry.Node = Op.getOperand(3); 3808 Args.push_back(Entry); 3809 std::pair<SDOperand,SDOperand> CallResult = 3810 LowerCallTo(Chain, Type::VoidTy, false, false, CallingConv::C, false, 3811 DAG.getExternalSymbol("memset", IntPtr), Args, DAG); 3812 return CallResult.second; 3813 } 3814 3815 MVT::ValueType AVT; 3816 SDOperand Count; 3817 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 3818 unsigned BytesLeft = 0; 3819 bool TwoRepStos = false; 3820 if (ValC) { 3821 unsigned ValReg; 3822 uint64_t Val = ValC->getValue() & 255; 3823 3824 // If the value is a constant, then we can potentially use larger sets. 3825 switch (Align & 3) { 3826 case 2: // WORD aligned 3827 AVT = MVT::i16; 3828 ValReg = X86::AX; 3829 Val = (Val << 8) | Val; 3830 break; 3831 case 0: // DWORD aligned 3832 AVT = MVT::i32; 3833 ValReg = X86::EAX; 3834 Val = (Val << 8) | Val; 3835 Val = (Val << 16) | Val; 3836 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned 3837 AVT = MVT::i64; 3838 ValReg = X86::RAX; 3839 Val = (Val << 32) | Val; 3840 } 3841 break; 3842 default: // Byte aligned 3843 AVT = MVT::i8; 3844 ValReg = X86::AL; 3845 Count = Op.getOperand(3); 3846 break; 3847 } 3848 3849 if (AVT > MVT::i8) { 3850 if (I) { 3851 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 3852 Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy()); 3853 BytesLeft = I->getValue() % UBytes; 3854 } else { 3855 assert(AVT >= MVT::i32 && 3856 "Do not use rep;stos if not at least DWORD aligned"); 3857 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 3858 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 3859 TwoRepStos = true; 3860 } 3861 } 3862 3863 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT), 3864 InFlag); 3865 InFlag = Chain.getValue(1); 3866 } else { 3867 AVT = MVT::i8; 3868 Count = Op.getOperand(3); 3869 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag); 3870 InFlag = Chain.getValue(1); 3871 } 3872 3873 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 3874 Count, InFlag); 3875 InFlag = Chain.getValue(1); 3876 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 3877 Op.getOperand(1), InFlag); 3878 InFlag = Chain.getValue(1); 3879 3880 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 3881 SmallVector<SDOperand, 8> Ops; 3882 Ops.push_back(Chain); 3883 Ops.push_back(DAG.getValueType(AVT)); 3884 Ops.push_back(InFlag); 3885 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 3886 3887 if (TwoRepStos) { 3888 InFlag = Chain.getValue(1); 3889 Count = Op.getOperand(3); 3890 MVT::ValueType CVT = Count.getValueType(); 3891 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 3892 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 3893 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 3894 Left, InFlag); 3895 InFlag = Chain.getValue(1); 3896 Tys = DAG.getVTList(MVT::Other, MVT::Flag); 3897 Ops.clear(); 3898 Ops.push_back(Chain); 3899 Ops.push_back(DAG.getValueType(MVT::i8)); 3900 Ops.push_back(InFlag); 3901 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 3902 } else if (BytesLeft) { 3903 // Issue stores for the last 1 - 7 bytes. 3904 SDOperand Value; 3905 unsigned Val = ValC->getValue() & 255; 3906 unsigned Offset = I->getValue() - BytesLeft; 3907 SDOperand DstAddr = Op.getOperand(1); 3908 MVT::ValueType AddrVT = DstAddr.getValueType(); 3909 if (BytesLeft >= 4) { 3910 Val = (Val << 8) | Val; 3911 Val = (Val << 16) | Val; 3912 Value = DAG.getConstant(Val, MVT::i32); 3913 Chain = DAG.getStore(Chain, Value, 3914 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 3915 DAG.getConstant(Offset, AddrVT)), 3916 NULL, 0); 3917 BytesLeft -= 4; 3918 Offset += 4; 3919 } 3920 if (BytesLeft >= 2) { 3921 Value = DAG.getConstant((Val << 8) | Val, MVT::i16); 3922 Chain = DAG.getStore(Chain, Value, 3923 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 3924 DAG.getConstant(Offset, AddrVT)), 3925 NULL, 0); 3926 BytesLeft -= 2; 3927 Offset += 2; 3928 } 3929 if (BytesLeft == 1) { 3930 Value = DAG.getConstant(Val, MVT::i8); 3931 Chain = DAG.getStore(Chain, Value, 3932 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 3933 DAG.getConstant(Offset, AddrVT)), 3934 NULL, 0); 3935 } 3936 } 3937 3938 return Chain; 3939} 3940 3941SDOperand X86TargetLowering::LowerMEMCPY(SDOperand Op, SelectionDAG &DAG) { 3942 SDOperand Chain = Op.getOperand(0); 3943 unsigned Align = 3944 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 3945 if (Align == 0) Align = 1; 3946 3947 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 3948 // If not DWORD aligned or size is more than the threshold, call memcpy. 3949 // The libc version is likely to be faster for these cases. It can use the 3950 // address value and run time information about the CPU. 3951 // With glibc 2.6.1 on a core 2, coping an array of 100M longs was 30% faster 3952 if ((Align & 3) != 0 || 3953 (I && I->getValue() > Subtarget->getMinRepStrSizeThreshold())) { 3954 MVT::ValueType IntPtr = getPointerTy(); 3955 TargetLowering::ArgListTy Args; 3956 TargetLowering::ArgListEntry Entry; 3957 Entry.Ty = getTargetData()->getIntPtrType(); 3958 Entry.Node = Op.getOperand(1); Args.push_back(Entry); 3959 Entry.Node = Op.getOperand(2); Args.push_back(Entry); 3960 Entry.Node = Op.getOperand(3); Args.push_back(Entry); 3961 std::pair<SDOperand,SDOperand> CallResult = 3962 LowerCallTo(Chain, Type::VoidTy, false, false, CallingConv::C, false, 3963 DAG.getExternalSymbol("memcpy", IntPtr), Args, DAG); 3964 return CallResult.second; 3965 } 3966 3967 MVT::ValueType AVT; 3968 SDOperand Count; 3969 unsigned BytesLeft = 0; 3970 bool TwoRepMovs = false; 3971 switch (Align & 3) { 3972 case 2: // WORD aligned 3973 AVT = MVT::i16; 3974 break; 3975 case 0: // DWORD aligned 3976 AVT = MVT::i32; 3977 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned 3978 AVT = MVT::i64; 3979 break; 3980 default: // Byte aligned 3981 AVT = MVT::i8; 3982 Count = Op.getOperand(3); 3983 break; 3984 } 3985 3986 if (AVT > MVT::i8) { 3987 if (I) { 3988 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 3989 Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy()); 3990 BytesLeft = I->getValue() % UBytes; 3991 } else { 3992 assert(AVT >= MVT::i32 && 3993 "Do not use rep;movs if not at least DWORD aligned"); 3994 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 3995 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 3996 TwoRepMovs = true; 3997 } 3998 } 3999 4000 SDOperand InFlag(0, 0); 4001 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4002 Count, InFlag); 4003 InFlag = Chain.getValue(1); 4004 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4005 Op.getOperand(1), InFlag); 4006 InFlag = Chain.getValue(1); 4007 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI, 4008 Op.getOperand(2), InFlag); 4009 InFlag = Chain.getValue(1); 4010 4011 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4012 SmallVector<SDOperand, 8> Ops; 4013 Ops.push_back(Chain); 4014 Ops.push_back(DAG.getValueType(AVT)); 4015 Ops.push_back(InFlag); 4016 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4017 4018 if (TwoRepMovs) { 4019 InFlag = Chain.getValue(1); 4020 Count = Op.getOperand(3); 4021 MVT::ValueType CVT = Count.getValueType(); 4022 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4023 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4024 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4025 Left, InFlag); 4026 InFlag = Chain.getValue(1); 4027 Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4028 Ops.clear(); 4029 Ops.push_back(Chain); 4030 Ops.push_back(DAG.getValueType(MVT::i8)); 4031 Ops.push_back(InFlag); 4032 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4033 } else if (BytesLeft) { 4034 // Issue loads and stores for the last 1 - 7 bytes. 4035 unsigned Offset = I->getValue() - BytesLeft; 4036 SDOperand DstAddr = Op.getOperand(1); 4037 MVT::ValueType DstVT = DstAddr.getValueType(); 4038 SDOperand SrcAddr = Op.getOperand(2); 4039 MVT::ValueType SrcVT = SrcAddr.getValueType(); 4040 SDOperand Value; 4041 if (BytesLeft >= 4) { 4042 Value = DAG.getLoad(MVT::i32, Chain, 4043 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4044 DAG.getConstant(Offset, SrcVT)), 4045 NULL, 0); 4046 Chain = Value.getValue(1); 4047 Chain = DAG.getStore(Chain, Value, 4048 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4049 DAG.getConstant(Offset, DstVT)), 4050 NULL, 0); 4051 BytesLeft -= 4; 4052 Offset += 4; 4053 } 4054 if (BytesLeft >= 2) { 4055 Value = DAG.getLoad(MVT::i16, Chain, 4056 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4057 DAG.getConstant(Offset, SrcVT)), 4058 NULL, 0); 4059 Chain = Value.getValue(1); 4060 Chain = DAG.getStore(Chain, Value, 4061 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4062 DAG.getConstant(Offset, DstVT)), 4063 NULL, 0); 4064 BytesLeft -= 2; 4065 Offset += 2; 4066 } 4067 4068 if (BytesLeft == 1) { 4069 Value = DAG.getLoad(MVT::i8, Chain, 4070 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4071 DAG.getConstant(Offset, SrcVT)), 4072 NULL, 0); 4073 Chain = Value.getValue(1); 4074 Chain = DAG.getStore(Chain, Value, 4075 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4076 DAG.getConstant(Offset, DstVT)), 4077 NULL, 0); 4078 } 4079 } 4080 4081 return Chain; 4082} 4083 4084SDOperand 4085X86TargetLowering::LowerREADCYCLCECOUNTER(SDOperand Op, SelectionDAG &DAG) { 4086 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4087 SDOperand TheOp = Op.getOperand(0); 4088 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheOp, 1); 4089 if (Subtarget->is64Bit()) { 4090 SDOperand Copy1 = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); 4091 SDOperand Copy2 = DAG.getCopyFromReg(Copy1.getValue(1), X86::RDX, 4092 MVT::i64, Copy1.getValue(2)); 4093 SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, Copy2, 4094 DAG.getConstant(32, MVT::i8)); 4095 SDOperand Ops[] = { 4096 DAG.getNode(ISD::OR, MVT::i64, Copy1, Tmp), Copy2.getValue(1) 4097 }; 4098 4099 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4100 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2); 4101 } 4102 4103 SDOperand Copy1 = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); 4104 SDOperand Copy2 = DAG.getCopyFromReg(Copy1.getValue(1), X86::EDX, 4105 MVT::i32, Copy1.getValue(2)); 4106 SDOperand Ops[] = { Copy1, Copy2, Copy2.getValue(1) }; 4107 Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 4108 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 3); 4109} 4110 4111SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { 4112 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2)); 4113 4114 if (!Subtarget->is64Bit()) { 4115 // vastart just stores the address of the VarArgsFrameIndex slot into the 4116 // memory location argument. 4117 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4118 return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV->getValue(), 4119 SV->getOffset()); 4120 } 4121 4122 // __va_list_tag: 4123 // gp_offset (0 - 6 * 8) 4124 // fp_offset (48 - 48 + 8 * 16) 4125 // overflow_arg_area (point to parameters coming in memory). 4126 // reg_save_area 4127 SmallVector<SDOperand, 8> MemOps; 4128 SDOperand FIN = Op.getOperand(1); 4129 // Store gp_offset 4130 SDOperand Store = DAG.getStore(Op.getOperand(0), 4131 DAG.getConstant(VarArgsGPOffset, MVT::i32), 4132 FIN, SV->getValue(), SV->getOffset()); 4133 MemOps.push_back(Store); 4134 4135 // Store fp_offset 4136 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 4137 DAG.getConstant(4, getPointerTy())); 4138 Store = DAG.getStore(Op.getOperand(0), 4139 DAG.getConstant(VarArgsFPOffset, MVT::i32), 4140 FIN, SV->getValue(), SV->getOffset()); 4141 MemOps.push_back(Store); 4142 4143 // Store ptr to overflow_arg_area 4144 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 4145 DAG.getConstant(4, getPointerTy())); 4146 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4147 Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV->getValue(), 4148 SV->getOffset()); 4149 MemOps.push_back(Store); 4150 4151 // Store ptr to reg_save_area. 4152 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 4153 DAG.getConstant(8, getPointerTy())); 4154 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 4155 Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV->getValue(), 4156 SV->getOffset()); 4157 MemOps.push_back(Store); 4158 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size()); 4159} 4160 4161SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) { 4162 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 4163 SDOperand Chain = Op.getOperand(0); 4164 SDOperand DstPtr = Op.getOperand(1); 4165 SDOperand SrcPtr = Op.getOperand(2); 4166 SrcValueSDNode *DstSV = cast<SrcValueSDNode>(Op.getOperand(3)); 4167 SrcValueSDNode *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4)); 4168 4169 SrcPtr = DAG.getLoad(getPointerTy(), Chain, SrcPtr, 4170 SrcSV->getValue(), SrcSV->getOffset()); 4171 Chain = SrcPtr.getValue(1); 4172 for (unsigned i = 0; i < 3; ++i) { 4173 SDOperand Val = DAG.getLoad(MVT::i64, Chain, SrcPtr, 4174 SrcSV->getValue(), SrcSV->getOffset()); 4175 Chain = Val.getValue(1); 4176 Chain = DAG.getStore(Chain, Val, DstPtr, 4177 DstSV->getValue(), DstSV->getOffset()); 4178 if (i == 2) 4179 break; 4180 SrcPtr = DAG.getNode(ISD::ADD, getPointerTy(), SrcPtr, 4181 DAG.getConstant(8, getPointerTy())); 4182 DstPtr = DAG.getNode(ISD::ADD, getPointerTy(), DstPtr, 4183 DAG.getConstant(8, getPointerTy())); 4184 } 4185 return Chain; 4186} 4187 4188SDOperand 4189X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 4190 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); 4191 switch (IntNo) { 4192 default: return SDOperand(); // Don't custom lower most intrinsics. 4193 // Comparison intrinsics. 4194 case Intrinsic::x86_sse_comieq_ss: 4195 case Intrinsic::x86_sse_comilt_ss: 4196 case Intrinsic::x86_sse_comile_ss: 4197 case Intrinsic::x86_sse_comigt_ss: 4198 case Intrinsic::x86_sse_comige_ss: 4199 case Intrinsic::x86_sse_comineq_ss: 4200 case Intrinsic::x86_sse_ucomieq_ss: 4201 case Intrinsic::x86_sse_ucomilt_ss: 4202 case Intrinsic::x86_sse_ucomile_ss: 4203 case Intrinsic::x86_sse_ucomigt_ss: 4204 case Intrinsic::x86_sse_ucomige_ss: 4205 case Intrinsic::x86_sse_ucomineq_ss: 4206 case Intrinsic::x86_sse2_comieq_sd: 4207 case Intrinsic::x86_sse2_comilt_sd: 4208 case Intrinsic::x86_sse2_comile_sd: 4209 case Intrinsic::x86_sse2_comigt_sd: 4210 case Intrinsic::x86_sse2_comige_sd: 4211 case Intrinsic::x86_sse2_comineq_sd: 4212 case Intrinsic::x86_sse2_ucomieq_sd: 4213 case Intrinsic::x86_sse2_ucomilt_sd: 4214 case Intrinsic::x86_sse2_ucomile_sd: 4215 case Intrinsic::x86_sse2_ucomigt_sd: 4216 case Intrinsic::x86_sse2_ucomige_sd: 4217 case Intrinsic::x86_sse2_ucomineq_sd: { 4218 unsigned Opc = 0; 4219 ISD::CondCode CC = ISD::SETCC_INVALID; 4220 switch (IntNo) { 4221 default: break; 4222 case Intrinsic::x86_sse_comieq_ss: 4223 case Intrinsic::x86_sse2_comieq_sd: 4224 Opc = X86ISD::COMI; 4225 CC = ISD::SETEQ; 4226 break; 4227 case Intrinsic::x86_sse_comilt_ss: 4228 case Intrinsic::x86_sse2_comilt_sd: 4229 Opc = X86ISD::COMI; 4230 CC = ISD::SETLT; 4231 break; 4232 case Intrinsic::x86_sse_comile_ss: 4233 case Intrinsic::x86_sse2_comile_sd: 4234 Opc = X86ISD::COMI; 4235 CC = ISD::SETLE; 4236 break; 4237 case Intrinsic::x86_sse_comigt_ss: 4238 case Intrinsic::x86_sse2_comigt_sd: 4239 Opc = X86ISD::COMI; 4240 CC = ISD::SETGT; 4241 break; 4242 case Intrinsic::x86_sse_comige_ss: 4243 case Intrinsic::x86_sse2_comige_sd: 4244 Opc = X86ISD::COMI; 4245 CC = ISD::SETGE; 4246 break; 4247 case Intrinsic::x86_sse_comineq_ss: 4248 case Intrinsic::x86_sse2_comineq_sd: 4249 Opc = X86ISD::COMI; 4250 CC = ISD::SETNE; 4251 break; 4252 case Intrinsic::x86_sse_ucomieq_ss: 4253 case Intrinsic::x86_sse2_ucomieq_sd: 4254 Opc = X86ISD::UCOMI; 4255 CC = ISD::SETEQ; 4256 break; 4257 case Intrinsic::x86_sse_ucomilt_ss: 4258 case Intrinsic::x86_sse2_ucomilt_sd: 4259 Opc = X86ISD::UCOMI; 4260 CC = ISD::SETLT; 4261 break; 4262 case Intrinsic::x86_sse_ucomile_ss: 4263 case Intrinsic::x86_sse2_ucomile_sd: 4264 Opc = X86ISD::UCOMI; 4265 CC = ISD::SETLE; 4266 break; 4267 case Intrinsic::x86_sse_ucomigt_ss: 4268 case Intrinsic::x86_sse2_ucomigt_sd: 4269 Opc = X86ISD::UCOMI; 4270 CC = ISD::SETGT; 4271 break; 4272 case Intrinsic::x86_sse_ucomige_ss: 4273 case Intrinsic::x86_sse2_ucomige_sd: 4274 Opc = X86ISD::UCOMI; 4275 CC = ISD::SETGE; 4276 break; 4277 case Intrinsic::x86_sse_ucomineq_ss: 4278 case Intrinsic::x86_sse2_ucomineq_sd: 4279 Opc = X86ISD::UCOMI; 4280 CC = ISD::SETNE; 4281 break; 4282 } 4283 4284 unsigned X86CC; 4285 SDOperand LHS = Op.getOperand(1); 4286 SDOperand RHS = Op.getOperand(2); 4287 translateX86CC(CC, true, X86CC, LHS, RHS, DAG); 4288 4289 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4290 SDOperand Ops1[] = { DAG.getEntryNode(), LHS, RHS }; 4291 SDOperand Cond = DAG.getNode(Opc, VTs, 2, Ops1, 3); 4292 VTs = DAG.getNodeValueTypes(MVT::i8, MVT::Flag); 4293 SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond }; 4294 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2); 4295 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); 4296 } 4297 } 4298} 4299 4300SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { 4301 // Depths > 0 not supported yet! 4302 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 4303 return SDOperand(); 4304 4305 // Just load the return address 4306 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 4307 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); 4308} 4309 4310SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { 4311 // Depths > 0 not supported yet! 4312 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 4313 return SDOperand(); 4314 4315 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 4316 return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI, 4317 DAG.getConstant(4, getPointerTy())); 4318} 4319 4320SDOperand X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDOperand Op, 4321 SelectionDAG &DAG) { 4322 // Is not yet supported on x86-64 4323 if (Subtarget->is64Bit()) 4324 return SDOperand(); 4325 4326 return DAG.getConstant(8, getPointerTy()); 4327} 4328 4329SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG) 4330{ 4331 assert(!Subtarget->is64Bit() && 4332 "Lowering of eh_return builtin is not supported yet on x86-64"); 4333 4334 MachineFunction &MF = DAG.getMachineFunction(); 4335 SDOperand Chain = Op.getOperand(0); 4336 SDOperand Offset = Op.getOperand(1); 4337 SDOperand Handler = Op.getOperand(2); 4338 4339 SDOperand Frame = DAG.getRegister(RegInfo->getFrameRegister(MF), 4340 getPointerTy()); 4341 4342 SDOperand StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame, 4343 DAG.getConstant(-4UL, getPointerTy())); 4344 StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset); 4345 Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0); 4346 Chain = DAG.getCopyToReg(Chain, X86::ECX, StoreAddr); 4347 MF.addLiveOut(X86::ECX); 4348 4349 return DAG.getNode(X86ISD::EH_RETURN, MVT::Other, 4350 Chain, DAG.getRegister(X86::ECX, getPointerTy())); 4351} 4352 4353SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, 4354 SelectionDAG &DAG) { 4355 SDOperand Root = Op.getOperand(0); 4356 SDOperand Trmp = Op.getOperand(1); // trampoline 4357 SDOperand FPtr = Op.getOperand(2); // nested function 4358 SDOperand Nest = Op.getOperand(3); // 'nest' parameter value 4359 4360 SrcValueSDNode *TrmpSV = cast<SrcValueSDNode>(Op.getOperand(4)); 4361 4362 if (Subtarget->is64Bit()) { 4363 return SDOperand(); // not yet supported 4364 } else { 4365 Function *Func = (Function *) 4366 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 4367 unsigned CC = Func->getCallingConv(); 4368 unsigned NestReg; 4369 4370 switch (CC) { 4371 default: 4372 assert(0 && "Unsupported calling convention"); 4373 case CallingConv::C: 4374 case CallingConv::Fast: 4375 case CallingConv::X86_StdCall: { 4376 // Pass 'nest' parameter in ECX. 4377 // Must be kept in sync with X86CallingConv.td 4378 NestReg = X86::ECX; 4379 4380 // Check that ECX wasn't needed by an 'inreg' parameter. 4381 const FunctionType *FTy = Func->getFunctionType(); 4382 const ParamAttrsList *Attrs = FTy->getParamAttrs(); 4383 4384 if (Attrs && !Func->isVarArg()) { 4385 unsigned InRegCount = 0; 4386 unsigned Idx = 1; 4387 4388 for (FunctionType::param_iterator I = FTy->param_begin(), 4389 E = FTy->param_end(); I != E; ++I, ++Idx) 4390 if (Attrs->paramHasAttr(Idx, ParamAttr::InReg)) 4391 // FIXME: should only count parameters that are lowered to integers. 4392 InRegCount += (getTargetData()->getTypeSizeInBits(*I) + 31) / 32; 4393 4394 if (InRegCount > 2) { 4395 cerr << "Nest register in use - reduce number of inreg parameters!\n"; 4396 abort(); 4397 } 4398 } 4399 break; 4400 } 4401 case CallingConv::X86_FastCall: 4402 // Pass 'nest' parameter in EAX. 4403 // Must be kept in sync with X86CallingConv.td 4404 NestReg = X86::EAX; 4405 break; 4406 } 4407 4408 const X86InstrInfo *TII = 4409 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 4410 4411 SDOperand OutChains[4]; 4412 SDOperand Addr, Disp; 4413 4414 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32)); 4415 Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr); 4416 4417 unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri); 4418 unsigned char N86Reg = ((X86RegisterInfo&)RegInfo).getX86RegNum(NestReg); 4419 OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 4420 Trmp, TrmpSV->getValue(), TrmpSV->getOffset()); 4421 4422 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32)); 4423 OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpSV->getValue(), 4424 TrmpSV->getOffset() + 1, false, 1); 4425 4426 unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP); 4427 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32)); 4428 OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr, 4429 TrmpSV->getValue() + 5, TrmpSV->getOffset()); 4430 4431 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(6, MVT::i32)); 4432 OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpSV->getValue(), 4433 TrmpSV->getOffset() + 6, false, 1); 4434 4435 SDOperand Ops[] = 4436 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) }; 4437 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 4438 } 4439} 4440 4441/// LowerOperation - Provide custom lowering hooks for some operations. 4442/// 4443SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 4444 switch (Op.getOpcode()) { 4445 default: assert(0 && "Should not custom lower this!"); 4446 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 4447 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 4448 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 4449 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 4450 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 4451 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4452 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 4453 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 4454 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 4455 case ISD::SHL_PARTS: 4456 case ISD::SRA_PARTS: 4457 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 4458 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 4459 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 4460 case ISD::FABS: return LowerFABS(Op, DAG); 4461 case ISD::FNEG: return LowerFNEG(Op, DAG); 4462 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 4463 case ISD::SETCC: return LowerSETCC(Op, DAG, DAG.getEntryNode()); 4464 case ISD::SELECT: return LowerSELECT(Op, DAG); 4465 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 4466 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 4467 case ISD::CALL: return LowerCALL(Op, DAG); 4468 case ISD::RET: return LowerRET(Op, DAG); 4469 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); 4470 case ISD::MEMSET: return LowerMEMSET(Op, DAG); 4471 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG); 4472 case ISD::READCYCLECOUNTER: return LowerREADCYCLCECOUNTER(Op, DAG); 4473 case ISD::VASTART: return LowerVASTART(Op, DAG); 4474 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 4475 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 4476 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 4477 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 4478 case ISD::FRAME_TO_ARGS_OFFSET: 4479 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 4480 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 4481 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 4482 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 4483 } 4484 return SDOperand(); 4485} 4486 4487const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 4488 switch (Opcode) { 4489 default: return NULL; 4490 case X86ISD::SHLD: return "X86ISD::SHLD"; 4491 case X86ISD::SHRD: return "X86ISD::SHRD"; 4492 case X86ISD::FAND: return "X86ISD::FAND"; 4493 case X86ISD::FOR: return "X86ISD::FOR"; 4494 case X86ISD::FXOR: return "X86ISD::FXOR"; 4495 case X86ISD::FSRL: return "X86ISD::FSRL"; 4496 case X86ISD::FILD: return "X86ISD::FILD"; 4497 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 4498 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 4499 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 4500 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 4501 case X86ISD::FLD: return "X86ISD::FLD"; 4502 case X86ISD::FST: return "X86ISD::FST"; 4503 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT"; 4504 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT"; 4505 case X86ISD::CALL: return "X86ISD::CALL"; 4506 case X86ISD::TAILCALL: return "X86ISD::TAILCALL"; 4507 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 4508 case X86ISD::CMP: return "X86ISD::CMP"; 4509 case X86ISD::COMI: return "X86ISD::COMI"; 4510 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 4511 case X86ISD::SETCC: return "X86ISD::SETCC"; 4512 case X86ISD::CMOV: return "X86ISD::CMOV"; 4513 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 4514 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 4515 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 4516 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 4517 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 4518 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 4519 case X86ISD::S2VEC: return "X86ISD::S2VEC"; 4520 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 4521 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 4522 case X86ISD::FMAX: return "X86ISD::FMAX"; 4523 case X86ISD::FMIN: return "X86ISD::FMIN"; 4524 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 4525 case X86ISD::FRCP: return "X86ISD::FRCP"; 4526 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 4527 case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER"; 4528 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 4529 } 4530} 4531 4532// isLegalAddressingMode - Return true if the addressing mode represented 4533// by AM is legal for this target, for a load/store of the specified type. 4534bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 4535 const Type *Ty) const { 4536 // X86 supports extremely general addressing modes. 4537 4538 // X86 allows a sign-extended 32-bit immediate field as a displacement. 4539 if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1) 4540 return false; 4541 4542 if (AM.BaseGV) { 4543 // We can only fold this if we don't need an extra load. 4544 if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false)) 4545 return false; 4546 4547 // X86-64 only supports addr of globals in small code model. 4548 if (Subtarget->is64Bit()) { 4549 if (getTargetMachine().getCodeModel() != CodeModel::Small) 4550 return false; 4551 // If lower 4G is not available, then we must use rip-relative addressing. 4552 if (AM.BaseOffs || AM.Scale > 1) 4553 return false; 4554 } 4555 } 4556 4557 switch (AM.Scale) { 4558 case 0: 4559 case 1: 4560 case 2: 4561 case 4: 4562 case 8: 4563 // These scales always work. 4564 break; 4565 case 3: 4566 case 5: 4567 case 9: 4568 // These scales are formed with basereg+scalereg. Only accept if there is 4569 // no basereg yet. 4570 if (AM.HasBaseReg) 4571 return false; 4572 break; 4573 default: // Other stuff never works. 4574 return false; 4575 } 4576 4577 return true; 4578} 4579 4580 4581/// isShuffleMaskLegal - Targets can use this to indicate that they only 4582/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 4583/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 4584/// are assumed to be legal. 4585bool 4586X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { 4587 // Only do shuffles on 128-bit vector types for now. 4588 if (MVT::getSizeInBits(VT) == 64) return false; 4589 return (Mask.Val->getNumOperands() <= 4 || 4590 isIdentityMask(Mask.Val) || 4591 isIdentityMask(Mask.Val, true) || 4592 isSplatMask(Mask.Val) || 4593 isPSHUFHW_PSHUFLWMask(Mask.Val) || 4594 X86::isUNPCKLMask(Mask.Val) || 4595 X86::isUNPCKHMask(Mask.Val) || 4596 X86::isUNPCKL_v_undef_Mask(Mask.Val) || 4597 X86::isUNPCKH_v_undef_Mask(Mask.Val)); 4598} 4599 4600bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps, 4601 MVT::ValueType EVT, 4602 SelectionDAG &DAG) const { 4603 unsigned NumElts = BVOps.size(); 4604 // Only do shuffles on 128-bit vector types for now. 4605 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; 4606 if (NumElts == 2) return true; 4607 if (NumElts == 4) { 4608 return (isMOVLMask(&BVOps[0], 4) || 4609 isCommutedMOVL(&BVOps[0], 4, true) || 4610 isSHUFPMask(&BVOps[0], 4) || 4611 isCommutedSHUFP(&BVOps[0], 4)); 4612 } 4613 return false; 4614} 4615 4616//===----------------------------------------------------------------------===// 4617// X86 Scheduler Hooks 4618//===----------------------------------------------------------------------===// 4619 4620MachineBasicBlock * 4621X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 4622 MachineBasicBlock *BB) { 4623 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4624 switch (MI->getOpcode()) { 4625 default: assert(false && "Unexpected instr type to insert"); 4626 case X86::CMOV_FR32: 4627 case X86::CMOV_FR64: 4628 case X86::CMOV_V4F32: 4629 case X86::CMOV_V2F64: 4630 case X86::CMOV_V2I64: { 4631 // To "insert" a SELECT_CC instruction, we actually have to insert the 4632 // diamond control-flow pattern. The incoming instruction knows the 4633 // destination vreg to set, the condition code register to branch on, the 4634 // true/false values to select between, and a branch opcode to use. 4635 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4636 ilist<MachineBasicBlock>::iterator It = BB; 4637 ++It; 4638 4639 // thisMBB: 4640 // ... 4641 // TrueVal = ... 4642 // cmpTY ccX, r1, r2 4643 // bCC copy1MBB 4644 // fallthrough --> copy0MBB 4645 MachineBasicBlock *thisMBB = BB; 4646 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 4647 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 4648 unsigned Opc = 4649 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 4650 BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB); 4651 MachineFunction *F = BB->getParent(); 4652 F->getBasicBlockList().insert(It, copy0MBB); 4653 F->getBasicBlockList().insert(It, sinkMBB); 4654 // Update machine-CFG edges by first adding all successors of the current 4655 // block to the new block which will contain the Phi node for the select. 4656 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 4657 e = BB->succ_end(); i != e; ++i) 4658 sinkMBB->addSuccessor(*i); 4659 // Next, remove all successors of the current block, and add the true 4660 // and fallthrough blocks as its successors. 4661 while(!BB->succ_empty()) 4662 BB->removeSuccessor(BB->succ_begin()); 4663 BB->addSuccessor(copy0MBB); 4664 BB->addSuccessor(sinkMBB); 4665 4666 // copy0MBB: 4667 // %FalseValue = ... 4668 // # fallthrough to sinkMBB 4669 BB = copy0MBB; 4670 4671 // Update machine-CFG edges 4672 BB->addSuccessor(sinkMBB); 4673 4674 // sinkMBB: 4675 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 4676 // ... 4677 BB = sinkMBB; 4678 BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg()) 4679 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 4680 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 4681 4682 delete MI; // The pseudo instruction is gone now. 4683 return BB; 4684 } 4685 4686 case X86::FP32_TO_INT16_IN_MEM: 4687 case X86::FP32_TO_INT32_IN_MEM: 4688 case X86::FP32_TO_INT64_IN_MEM: 4689 case X86::FP64_TO_INT16_IN_MEM: 4690 case X86::FP64_TO_INT32_IN_MEM: 4691 case X86::FP64_TO_INT64_IN_MEM: 4692 case X86::FP80_TO_INT16_IN_MEM: 4693 case X86::FP80_TO_INT32_IN_MEM: 4694 case X86::FP80_TO_INT64_IN_MEM: { 4695 // Change the floating point control register to use "round towards zero" 4696 // mode when truncating to an integer value. 4697 MachineFunction *F = BB->getParent(); 4698 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2); 4699 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx); 4700 4701 // Load the old value of the high byte of the control word... 4702 unsigned OldCW = 4703 F->getSSARegMap()->createVirtualRegister(X86::GR16RegisterClass); 4704 addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx); 4705 4706 // Set the high part to be round to zero... 4707 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx) 4708 .addImm(0xC7F); 4709 4710 // Reload the modified control word now... 4711 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 4712 4713 // Restore the memory image of control word to original value 4714 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx) 4715 .addReg(OldCW); 4716 4717 // Get the X86 opcode to use. 4718 unsigned Opc; 4719 switch (MI->getOpcode()) { 4720 default: assert(0 && "illegal opcode!"); 4721 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 4722 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 4723 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 4724 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 4725 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 4726 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 4727 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 4728 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 4729 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 4730 } 4731 4732 X86AddressMode AM; 4733 MachineOperand &Op = MI->getOperand(0); 4734 if (Op.isRegister()) { 4735 AM.BaseType = X86AddressMode::RegBase; 4736 AM.Base.Reg = Op.getReg(); 4737 } else { 4738 AM.BaseType = X86AddressMode::FrameIndexBase; 4739 AM.Base.FrameIndex = Op.getFrameIndex(); 4740 } 4741 Op = MI->getOperand(1); 4742 if (Op.isImmediate()) 4743 AM.Scale = Op.getImm(); 4744 Op = MI->getOperand(2); 4745 if (Op.isImmediate()) 4746 AM.IndexReg = Op.getImm(); 4747 Op = MI->getOperand(3); 4748 if (Op.isGlobalAddress()) { 4749 AM.GV = Op.getGlobal(); 4750 } else { 4751 AM.Disp = Op.getImm(); 4752 } 4753 addFullAddress(BuildMI(BB, TII->get(Opc)), AM) 4754 .addReg(MI->getOperand(4).getReg()); 4755 4756 // Reload the original control word now. 4757 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 4758 4759 delete MI; // The pseudo instruction is gone now. 4760 return BB; 4761 } 4762 } 4763} 4764 4765//===----------------------------------------------------------------------===// 4766// X86 Optimization Hooks 4767//===----------------------------------------------------------------------===// 4768 4769void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 4770 uint64_t Mask, 4771 uint64_t &KnownZero, 4772 uint64_t &KnownOne, 4773 const SelectionDAG &DAG, 4774 unsigned Depth) const { 4775 unsigned Opc = Op.getOpcode(); 4776 assert((Opc >= ISD::BUILTIN_OP_END || 4777 Opc == ISD::INTRINSIC_WO_CHAIN || 4778 Opc == ISD::INTRINSIC_W_CHAIN || 4779 Opc == ISD::INTRINSIC_VOID) && 4780 "Should use MaskedValueIsZero if you don't know whether Op" 4781 " is a target node!"); 4782 4783 KnownZero = KnownOne = 0; // Don't know anything. 4784 switch (Opc) { 4785 default: break; 4786 case X86ISD::SETCC: 4787 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL); 4788 break; 4789 } 4790} 4791 4792/// getShuffleScalarElt - Returns the scalar element that will make up the ith 4793/// element of the result of the vector shuffle. 4794static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) { 4795 MVT::ValueType VT = N->getValueType(0); 4796 SDOperand PermMask = N->getOperand(2); 4797 unsigned NumElems = PermMask.getNumOperands(); 4798 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1); 4799 i %= NumElems; 4800 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4801 return (i == 0) 4802 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 4803 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) { 4804 SDOperand Idx = PermMask.getOperand(i); 4805 if (Idx.getOpcode() == ISD::UNDEF) 4806 return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 4807 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG); 4808 } 4809 return SDOperand(); 4810} 4811 4812/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 4813/// node is a GlobalAddress + an offset. 4814static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) { 4815 unsigned Opc = N->getOpcode(); 4816 if (Opc == X86ISD::Wrapper) { 4817 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) { 4818 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 4819 return true; 4820 } 4821 } else if (Opc == ISD::ADD) { 4822 SDOperand N1 = N->getOperand(0); 4823 SDOperand N2 = N->getOperand(1); 4824 if (isGAPlusOffset(N1.Val, GA, Offset)) { 4825 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 4826 if (V) { 4827 Offset += V->getSignExtended(); 4828 return true; 4829 } 4830 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 4831 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 4832 if (V) { 4833 Offset += V->getSignExtended(); 4834 return true; 4835 } 4836 } 4837 } 4838 return false; 4839} 4840 4841/// isConsecutiveLoad - Returns true if N is loading from an address of Base 4842/// + Dist * Size. 4843static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size, 4844 MachineFrameInfo *MFI) { 4845 if (N->getOperand(0).Val != Base->getOperand(0).Val) 4846 return false; 4847 4848 SDOperand Loc = N->getOperand(1); 4849 SDOperand BaseLoc = Base->getOperand(1); 4850 if (Loc.getOpcode() == ISD::FrameIndex) { 4851 if (BaseLoc.getOpcode() != ISD::FrameIndex) 4852 return false; 4853 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 4854 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 4855 int FS = MFI->getObjectSize(FI); 4856 int BFS = MFI->getObjectSize(BFI); 4857 if (FS != BFS || FS != Size) return false; 4858 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size); 4859 } else { 4860 GlobalValue *GV1 = NULL; 4861 GlobalValue *GV2 = NULL; 4862 int64_t Offset1 = 0; 4863 int64_t Offset2 = 0; 4864 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 4865 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 4866 if (isGA1 && isGA2 && GV1 == GV2) 4867 return Offset1 == (Offset2 + Dist*Size); 4868 } 4869 4870 return false; 4871} 4872 4873static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI, 4874 const X86Subtarget *Subtarget) { 4875 GlobalValue *GV; 4876 int64_t Offset; 4877 if (isGAPlusOffset(Base, GV, Offset)) 4878 return (GV->getAlignment() >= 16 && (Offset % 16) == 0); 4879 else { 4880 assert(Base->getOpcode() == ISD::FrameIndex && "Unexpected base node!"); 4881 int BFI = cast<FrameIndexSDNode>(Base)->getIndex(); 4882 if (BFI < 0) 4883 // Fixed objects do not specify alignment, however the offsets are known. 4884 return ((Subtarget->getStackAlignment() % 16) == 0 && 4885 (MFI->getObjectOffset(BFI) % 16) == 0); 4886 else 4887 return MFI->getObjectAlignment(BFI) >= 16; 4888 } 4889 return false; 4890} 4891 4892 4893/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 4894/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 4895/// if the load addresses are consecutive, non-overlapping, and in the right 4896/// order. 4897static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 4898 const X86Subtarget *Subtarget) { 4899 MachineFunction &MF = DAG.getMachineFunction(); 4900 MachineFrameInfo *MFI = MF.getFrameInfo(); 4901 MVT::ValueType VT = N->getValueType(0); 4902 MVT::ValueType EVT = MVT::getVectorElementType(VT); 4903 SDOperand PermMask = N->getOperand(2); 4904 int NumElems = (int)PermMask.getNumOperands(); 4905 SDNode *Base = NULL; 4906 for (int i = 0; i < NumElems; ++i) { 4907 SDOperand Idx = PermMask.getOperand(i); 4908 if (Idx.getOpcode() == ISD::UNDEF) { 4909 if (!Base) return SDOperand(); 4910 } else { 4911 SDOperand Arg = 4912 getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG); 4913 if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val)) 4914 return SDOperand(); 4915 if (!Base) 4916 Base = Arg.Val; 4917 else if (!isConsecutiveLoad(Arg.Val, Base, 4918 i, MVT::getSizeInBits(EVT)/8,MFI)) 4919 return SDOperand(); 4920 } 4921 } 4922 4923 bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget); 4924 LoadSDNode *LD = cast<LoadSDNode>(Base); 4925 if (isAlign16) { 4926 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 4927 LD->getSrcValueOffset(), LD->isVolatile()); 4928 } else { 4929 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 4930 LD->getSrcValueOffset(), LD->isVolatile(), 4931 LD->getAlignment()); 4932 } 4933} 4934 4935/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 4936static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 4937 const X86Subtarget *Subtarget) { 4938 SDOperand Cond = N->getOperand(0); 4939 4940 // If we have SSE[12] support, try to form min/max nodes. 4941 if (Subtarget->hasSSE2() && 4942 (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) { 4943 if (Cond.getOpcode() == ISD::SETCC) { 4944 // Get the LHS/RHS of the select. 4945 SDOperand LHS = N->getOperand(1); 4946 SDOperand RHS = N->getOperand(2); 4947 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 4948 4949 unsigned Opcode = 0; 4950 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) { 4951 switch (CC) { 4952 default: break; 4953 case ISD::SETOLE: // (X <= Y) ? X : Y -> min 4954 case ISD::SETULE: 4955 case ISD::SETLE: 4956 if (!UnsafeFPMath) break; 4957 // FALL THROUGH. 4958 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min 4959 case ISD::SETLT: 4960 Opcode = X86ISD::FMIN; 4961 break; 4962 4963 case ISD::SETOGT: // (X > Y) ? X : Y -> max 4964 case ISD::SETUGT: 4965 case ISD::SETGT: 4966 if (!UnsafeFPMath) break; 4967 // FALL THROUGH. 4968 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max 4969 case ISD::SETGE: 4970 Opcode = X86ISD::FMAX; 4971 break; 4972 } 4973 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) { 4974 switch (CC) { 4975 default: break; 4976 case ISD::SETOGT: // (X > Y) ? Y : X -> min 4977 case ISD::SETUGT: 4978 case ISD::SETGT: 4979 if (!UnsafeFPMath) break; 4980 // FALL THROUGH. 4981 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min 4982 case ISD::SETGE: 4983 Opcode = X86ISD::FMIN; 4984 break; 4985 4986 case ISD::SETOLE: // (X <= Y) ? Y : X -> max 4987 case ISD::SETULE: 4988 case ISD::SETLE: 4989 if (!UnsafeFPMath) break; 4990 // FALL THROUGH. 4991 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max 4992 case ISD::SETLT: 4993 Opcode = X86ISD::FMAX; 4994 break; 4995 } 4996 } 4997 4998 if (Opcode) 4999 return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS); 5000 } 5001 5002 } 5003 5004 return SDOperand(); 5005} 5006 5007 5008SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, 5009 DAGCombinerInfo &DCI) const { 5010 SelectionDAG &DAG = DCI.DAG; 5011 switch (N->getOpcode()) { 5012 default: break; 5013 case ISD::VECTOR_SHUFFLE: 5014 return PerformShuffleCombine(N, DAG, Subtarget); 5015 case ISD::SELECT: 5016 return PerformSELECTCombine(N, DAG, Subtarget); 5017 } 5018 5019 return SDOperand(); 5020} 5021 5022//===----------------------------------------------------------------------===// 5023// X86 Inline Assembly Support 5024//===----------------------------------------------------------------------===// 5025 5026/// getConstraintType - Given a constraint letter, return the type of 5027/// constraint it is for this target. 5028X86TargetLowering::ConstraintType 5029X86TargetLowering::getConstraintType(const std::string &Constraint) const { 5030 if (Constraint.size() == 1) { 5031 switch (Constraint[0]) { 5032 case 'A': 5033 case 'r': 5034 case 'R': 5035 case 'l': 5036 case 'q': 5037 case 'Q': 5038 case 'x': 5039 case 'Y': 5040 return C_RegisterClass; 5041 default: 5042 break; 5043 } 5044 } 5045 return TargetLowering::getConstraintType(Constraint); 5046} 5047 5048/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 5049/// vector. If it is invalid, don't add anything to Ops. 5050void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, 5051 char Constraint, 5052 std::vector<SDOperand>&Ops, 5053 SelectionDAG &DAG) { 5054 SDOperand Result(0, 0); 5055 5056 switch (Constraint) { 5057 default: break; 5058 case 'I': 5059 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 5060 if (C->getValue() <= 31) { 5061 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 5062 break; 5063 } 5064 } 5065 return; 5066 case 'N': 5067 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 5068 if (C->getValue() <= 255) { 5069 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 5070 break; 5071 } 5072 } 5073 return; 5074 case 'i': { 5075 // Literal immediates are always ok. 5076 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 5077 Result = DAG.getTargetConstant(CST->getValue(), Op.getValueType()); 5078 break; 5079 } 5080 5081 // If we are in non-pic codegen mode, we allow the address of a global (with 5082 // an optional displacement) to be used with 'i'. 5083 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 5084 int64_t Offset = 0; 5085 5086 // Match either (GA) or (GA+C) 5087 if (GA) { 5088 Offset = GA->getOffset(); 5089 } else if (Op.getOpcode() == ISD::ADD) { 5090 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 5091 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 5092 if (C && GA) { 5093 Offset = GA->getOffset()+C->getValue(); 5094 } else { 5095 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 5096 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 5097 if (C && GA) 5098 Offset = GA->getOffset()+C->getValue(); 5099 else 5100 C = 0, GA = 0; 5101 } 5102 } 5103 5104 if (GA) { 5105 // If addressing this global requires a load (e.g. in PIC mode), we can't 5106 // match. 5107 if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(), 5108 false)) 5109 return; 5110 5111 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), 5112 Offset); 5113 Result = Op; 5114 break; 5115 } 5116 5117 // Otherwise, not valid for this mode. 5118 return; 5119 } 5120 } 5121 5122 if (Result.Val) { 5123 Ops.push_back(Result); 5124 return; 5125 } 5126 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 5127} 5128 5129std::vector<unsigned> X86TargetLowering:: 5130getRegClassForInlineAsmConstraint(const std::string &Constraint, 5131 MVT::ValueType VT) const { 5132 if (Constraint.size() == 1) { 5133 // FIXME: not handling fp-stack yet! 5134 switch (Constraint[0]) { // GCC X86 Constraint Letters 5135 default: break; // Unknown constraint letter 5136 case 'A': // EAX/EDX 5137 if (VT == MVT::i32 || VT == MVT::i64) 5138 return make_vector<unsigned>(X86::EAX, X86::EDX, 0); 5139 break; 5140 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode) 5141 case 'Q': // Q_REGS 5142 if (VT == MVT::i32) 5143 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 5144 else if (VT == MVT::i16) 5145 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 5146 else if (VT == MVT::i8) 5147 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0); 5148 break; 5149 } 5150 } 5151 5152 return std::vector<unsigned>(); 5153} 5154 5155std::pair<unsigned, const TargetRegisterClass*> 5156X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 5157 MVT::ValueType VT) const { 5158 // First, see if this is a constraint that directly corresponds to an LLVM 5159 // register class. 5160 if (Constraint.size() == 1) { 5161 // GCC Constraint Letters 5162 switch (Constraint[0]) { 5163 default: break; 5164 case 'r': // GENERAL_REGS 5165 case 'R': // LEGACY_REGS 5166 case 'l': // INDEX_REGS 5167 if (VT == MVT::i64 && Subtarget->is64Bit()) 5168 return std::make_pair(0U, X86::GR64RegisterClass); 5169 if (VT == MVT::i32) 5170 return std::make_pair(0U, X86::GR32RegisterClass); 5171 else if (VT == MVT::i16) 5172 return std::make_pair(0U, X86::GR16RegisterClass); 5173 else if (VT == MVT::i8) 5174 return std::make_pair(0U, X86::GR8RegisterClass); 5175 break; 5176 case 'y': // MMX_REGS if MMX allowed. 5177 if (!Subtarget->hasMMX()) break; 5178 return std::make_pair(0U, X86::VR64RegisterClass); 5179 break; 5180 case 'Y': // SSE_REGS if SSE2 allowed 5181 if (!Subtarget->hasSSE2()) break; 5182 // FALL THROUGH. 5183 case 'x': // SSE_REGS if SSE1 allowed 5184 if (!Subtarget->hasSSE1()) break; 5185 5186 switch (VT) { 5187 default: break; 5188 // Scalar SSE types. 5189 case MVT::f32: 5190 case MVT::i32: 5191 return std::make_pair(0U, X86::FR32RegisterClass); 5192 case MVT::f64: 5193 case MVT::i64: 5194 return std::make_pair(0U, X86::FR64RegisterClass); 5195 // Vector types. 5196 case MVT::v16i8: 5197 case MVT::v8i16: 5198 case MVT::v4i32: 5199 case MVT::v2i64: 5200 case MVT::v4f32: 5201 case MVT::v2f64: 5202 return std::make_pair(0U, X86::VR128RegisterClass); 5203 } 5204 break; 5205 } 5206 } 5207 5208 // Use the default implementation in TargetLowering to convert the register 5209 // constraint into a member of a register class. 5210 std::pair<unsigned, const TargetRegisterClass*> Res; 5211 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 5212 5213 // Not found as a standard register? 5214 if (Res.second == 0) { 5215 // GCC calls "st(0)" just plain "st". 5216 if (StringsEqualNoCase("{st}", Constraint)) { 5217 Res.first = X86::ST0; 5218 Res.second = X86::RSTRegisterClass; 5219 } 5220 5221 return Res; 5222 } 5223 5224 // Otherwise, check to see if this is a register class of the wrong value 5225 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 5226 // turn into {ax},{dx}. 5227 if (Res.second->hasType(VT)) 5228 return Res; // Correct type already, nothing to do. 5229 5230 // All of the single-register GCC register classes map their values onto 5231 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 5232 // really want an 8-bit or 32-bit register, map to the appropriate register 5233 // class and return the appropriate register. 5234 if (Res.second != X86::GR16RegisterClass) 5235 return Res; 5236 5237 if (VT == MVT::i8) { 5238 unsigned DestReg = 0; 5239 switch (Res.first) { 5240 default: break; 5241 case X86::AX: DestReg = X86::AL; break; 5242 case X86::DX: DestReg = X86::DL; break; 5243 case X86::CX: DestReg = X86::CL; break; 5244 case X86::BX: DestReg = X86::BL; break; 5245 } 5246 if (DestReg) { 5247 Res.first = DestReg; 5248 Res.second = Res.second = X86::GR8RegisterClass; 5249 } 5250 } else if (VT == MVT::i32) { 5251 unsigned DestReg = 0; 5252 switch (Res.first) { 5253 default: break; 5254 case X86::AX: DestReg = X86::EAX; break; 5255 case X86::DX: DestReg = X86::EDX; break; 5256 case X86::CX: DestReg = X86::ECX; break; 5257 case X86::BX: DestReg = X86::EBX; break; 5258 case X86::SI: DestReg = X86::ESI; break; 5259 case X86::DI: DestReg = X86::EDI; break; 5260 case X86::BP: DestReg = X86::EBP; break; 5261 case X86::SP: DestReg = X86::ESP; break; 5262 } 5263 if (DestReg) { 5264 Res.first = DestReg; 5265 Res.second = Res.second = X86::GR32RegisterClass; 5266 } 5267 } else if (VT == MVT::i64) { 5268 unsigned DestReg = 0; 5269 switch (Res.first) { 5270 default: break; 5271 case X86::AX: DestReg = X86::RAX; break; 5272 case X86::DX: DestReg = X86::RDX; break; 5273 case X86::CX: DestReg = X86::RCX; break; 5274 case X86::BX: DestReg = X86::RBX; break; 5275 case X86::SI: DestReg = X86::RSI; break; 5276 case X86::DI: DestReg = X86::RDI; break; 5277 case X86::BP: DestReg = X86::RBP; break; 5278 case X86::SP: DestReg = X86::RSP; break; 5279 } 5280 if (DestReg) { 5281 Res.first = DestReg; 5282 Res.second = Res.second = X86::GR64RegisterClass; 5283 } 5284 } 5285 5286 return Res; 5287} 5288