X86ISelLowering.cpp revision 14b32e194152f7147d6f531346b923152e58900f
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by Chris Lattner and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86ISelLowering.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86TargetMachine.h" 20#include "llvm/CallingConv.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/ADT/BitVector.h" 27#include "llvm/ADT/VectorExtras.h" 28#include "llvm/Analysis/ScalarEvolutionExpressions.h" 29#include "llvm/CodeGen/CallingConvLower.h" 30#include "llvm/CodeGen/MachineFrameInfo.h" 31#include "llvm/CodeGen/MachineFunction.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/SelectionDAG.h" 34#include "llvm/CodeGen/SSARegMap.h" 35#include "llvm/Support/MathExtras.h" 36#include "llvm/Support/CommandLine.h" 37#include "llvm/Support/Debug.h" 38#include "llvm/Target/TargetOptions.h" 39#include "llvm/ADT/SmallSet.h" 40#include "llvm/ADT/StringExtras.h" 41#include "llvm/ParameterAttributes.h" 42using namespace llvm; 43 44X86TargetLowering::X86TargetLowering(TargetMachine &TM) 45 : TargetLowering(TM) { 46 Subtarget = &TM.getSubtarget<X86Subtarget>(); 47 X86ScalarSSEf64 = Subtarget->hasSSE2(); 48 X86ScalarSSEf32 = Subtarget->hasSSE1(); 49 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 50 51 52 RegInfo = TM.getRegisterInfo(); 53 54 // Set up the TargetLowering object. 55 56 // X86 is weird, it always uses i8 for shift amounts and setcc results. 57 setShiftAmountType(MVT::i8); 58 setSetCCResultType(MVT::i8); 59 setSetCCResultContents(ZeroOrOneSetCCResult); 60 setSchedulingPreference(SchedulingForRegPressure); 61 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0 62 setStackPointerRegisterToSaveRestore(X86StackPtr); 63 64 if (Subtarget->isTargetDarwin()) { 65 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 66 setUseUnderscoreSetJmp(false); 67 setUseUnderscoreLongJmp(false); 68 } else if (Subtarget->isTargetMingw()) { 69 // MS runtime is weird: it exports _setjmp, but longjmp! 70 setUseUnderscoreSetJmp(true); 71 setUseUnderscoreLongJmp(false); 72 } else { 73 setUseUnderscoreSetJmp(true); 74 setUseUnderscoreLongJmp(true); 75 } 76 77 // Set up the register classes. 78 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 79 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 80 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 81 if (Subtarget->is64Bit()) 82 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 83 84 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand); 85 86 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 87 // operation. 88 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 89 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 90 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 91 92 if (Subtarget->is64Bit()) { 93 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 94 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 95 } else { 96 if (X86ScalarSSEf64) 97 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP. 98 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand); 99 else 100 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 101 } 102 103 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 104 // this operation. 105 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 106 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 107 // SSE has no i16 to fp conversion, only i32 108 if (X86ScalarSSEf32) { 109 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 110 // f32 and f64 cases are Legal, f80 case is not 111 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 112 } else { 113 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 114 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 115 } 116 117 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 118 // are Legal, f80 is custom lowered. 119 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 120 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 121 122 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 123 // this operation. 124 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 125 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 126 127 if (X86ScalarSSEf32) { 128 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 129 // f32 and f64 cases are Legal, f80 case is not 130 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 131 } else { 132 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 133 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 134 } 135 136 // Handle FP_TO_UINT by promoting the destination to a larger signed 137 // conversion. 138 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 139 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 140 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 141 142 if (Subtarget->is64Bit()) { 143 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 144 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 145 } else { 146 if (X86ScalarSSEf32 && !Subtarget->hasSSE3()) 147 // Expand FP_TO_UINT into a select. 148 // FIXME: We would like to use a Custom expander here eventually to do 149 // the optimal thing for SSE vs. the default expansion in the legalizer. 150 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 151 else 152 // With SSE3 we can use fisttpll to convert to a signed i64. 153 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 154 } 155 156 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 157 if (!X86ScalarSSEf64) { 158 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 159 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 160 } 161 162 // Scalar integer multiply, multiply-high, divide, and remainder are 163 // lowered to use operations that produce two results, to match the 164 // available instructions. This exposes the two-result form to trivial 165 // CSE, which is able to combine x/y and x%y into a single instruction, 166 // for example. The single-result multiply instructions are introduced 167 // in X86ISelDAGToDAG.cpp, after CSE, for uses where the the high part 168 // is not needed. 169 setOperationAction(ISD::MUL , MVT::i8 , Expand); 170 setOperationAction(ISD::MULHS , MVT::i8 , Expand); 171 setOperationAction(ISD::MULHU , MVT::i8 , Expand); 172 setOperationAction(ISD::SDIV , MVT::i8 , Expand); 173 setOperationAction(ISD::UDIV , MVT::i8 , Expand); 174 setOperationAction(ISD::SREM , MVT::i8 , Expand); 175 setOperationAction(ISD::UREM , MVT::i8 , Expand); 176 setOperationAction(ISD::MUL , MVT::i16 , Expand); 177 setOperationAction(ISD::MULHS , MVT::i16 , Expand); 178 setOperationAction(ISD::MULHU , MVT::i16 , Expand); 179 setOperationAction(ISD::SDIV , MVT::i16 , Expand); 180 setOperationAction(ISD::UDIV , MVT::i16 , Expand); 181 setOperationAction(ISD::SREM , MVT::i16 , Expand); 182 setOperationAction(ISD::UREM , MVT::i16 , Expand); 183 setOperationAction(ISD::MUL , MVT::i32 , Expand); 184 setOperationAction(ISD::MULHS , MVT::i32 , Expand); 185 setOperationAction(ISD::MULHU , MVT::i32 , Expand); 186 setOperationAction(ISD::SDIV , MVT::i32 , Expand); 187 setOperationAction(ISD::UDIV , MVT::i32 , Expand); 188 setOperationAction(ISD::SREM , MVT::i32 , Expand); 189 setOperationAction(ISD::UREM , MVT::i32 , Expand); 190 setOperationAction(ISD::MUL , MVT::i64 , Expand); 191 setOperationAction(ISD::MULHS , MVT::i64 , Expand); 192 setOperationAction(ISD::MULHU , MVT::i64 , Expand); 193 setOperationAction(ISD::SDIV , MVT::i64 , Expand); 194 setOperationAction(ISD::UDIV , MVT::i64 , Expand); 195 setOperationAction(ISD::SREM , MVT::i64 , Expand); 196 setOperationAction(ISD::UREM , MVT::i64 , Expand); 197 198 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 199 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 200 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 201 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 202 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); 203 if (Subtarget->is64Bit()) 204 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 205 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 206 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 207 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 208 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 209 setOperationAction(ISD::FREM , MVT::f64 , Expand); 210 setOperationAction(ISD::FLT_ROUNDS , MVT::i32 , Custom); 211 212 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 213 setOperationAction(ISD::CTTZ , MVT::i8 , Expand); 214 setOperationAction(ISD::CTLZ , MVT::i8 , Expand); 215 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 216 setOperationAction(ISD::CTTZ , MVT::i16 , Expand); 217 setOperationAction(ISD::CTLZ , MVT::i16 , Expand); 218 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 219 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 220 setOperationAction(ISD::CTLZ , MVT::i32 , Expand); 221 if (Subtarget->is64Bit()) { 222 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 223 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 224 setOperationAction(ISD::CTLZ , MVT::i64 , Expand); 225 } 226 227 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 228 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 229 230 // These should be promoted to a larger select which is supported. 231 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 232 setOperationAction(ISD::SELECT , MVT::i8 , Promote); 233 // X86 wants to expand cmov itself. 234 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 235 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 236 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 237 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 238 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 239 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 240 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 241 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 242 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 243 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 244 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 245 if (Subtarget->is64Bit()) { 246 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 247 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 248 } 249 // X86 ret instruction may pop stack. 250 setOperationAction(ISD::RET , MVT::Other, Custom); 251 if (!Subtarget->is64Bit()) 252 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 253 254 // Darwin ABI issue. 255 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 256 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 257 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 258 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 259 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 260 if (Subtarget->is64Bit()) { 261 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 262 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 263 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 264 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 265 } 266 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 267 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 268 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 269 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 270 // X86 wants to expand memset / memcpy itself. 271 setOperationAction(ISD::MEMSET , MVT::Other, Custom); 272 setOperationAction(ISD::MEMCPY , MVT::Other, Custom); 273 274 // Use the default ISD::LOCATION expansion. 275 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 276 // FIXME - use subtarget debug flags 277 if (!Subtarget->isTargetDarwin() && 278 !Subtarget->isTargetELF() && 279 !Subtarget->isTargetCygMing()) 280 setOperationAction(ISD::LABEL, MVT::Other, Expand); 281 282 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 283 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 284 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 285 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 286 if (Subtarget->is64Bit()) { 287 // FIXME: Verify 288 setExceptionPointerRegister(X86::RAX); 289 setExceptionSelectorRegister(X86::RDX); 290 } else { 291 setExceptionPointerRegister(X86::EAX); 292 setExceptionSelectorRegister(X86::EDX); 293 } 294 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 295 296 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 297 298 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 299 setOperationAction(ISD::VASTART , MVT::Other, Custom); 300 setOperationAction(ISD::VAARG , MVT::Other, Expand); 301 setOperationAction(ISD::VAEND , MVT::Other, Expand); 302 if (Subtarget->is64Bit()) 303 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 304 else 305 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 306 307 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 308 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 309 if (Subtarget->is64Bit()) 310 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 311 if (Subtarget->isTargetCygMing()) 312 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 313 else 314 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 315 316 if (X86ScalarSSEf64) { 317 // f32 and f64 use SSE. 318 // Set up the FP register classes. 319 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 320 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 321 322 // Use ANDPD to simulate FABS. 323 setOperationAction(ISD::FABS , MVT::f64, Custom); 324 setOperationAction(ISD::FABS , MVT::f32, Custom); 325 326 // Use XORP to simulate FNEG. 327 setOperationAction(ISD::FNEG , MVT::f64, Custom); 328 setOperationAction(ISD::FNEG , MVT::f32, Custom); 329 330 // Use ANDPD and ORPD to simulate FCOPYSIGN. 331 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 332 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 333 334 // We don't support sin/cos/fmod 335 setOperationAction(ISD::FSIN , MVT::f64, Expand); 336 setOperationAction(ISD::FCOS , MVT::f64, Expand); 337 setOperationAction(ISD::FREM , MVT::f64, Expand); 338 setOperationAction(ISD::FSIN , MVT::f32, Expand); 339 setOperationAction(ISD::FCOS , MVT::f32, Expand); 340 setOperationAction(ISD::FREM , MVT::f32, Expand); 341 342 // Expand FP immediates into loads from the stack, except for the special 343 // cases we handle. 344 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 345 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 346 addLegalFPImmediate(APFloat(+0.0)); // xorpd 347 addLegalFPImmediate(APFloat(+0.0f)); // xorps 348 349 // Conversions to long double (in X87) go through memory. 350 setConvertAction(MVT::f32, MVT::f80, Expand); 351 setConvertAction(MVT::f64, MVT::f80, Expand); 352 353 // Conversions from long double (in X87) go through memory. 354 setConvertAction(MVT::f80, MVT::f32, Expand); 355 setConvertAction(MVT::f80, MVT::f64, Expand); 356 } else if (X86ScalarSSEf32) { 357 // Use SSE for f32, x87 for f64. 358 // Set up the FP register classes. 359 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 360 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 361 362 // Use ANDPS to simulate FABS. 363 setOperationAction(ISD::FABS , MVT::f32, Custom); 364 365 // Use XORP to simulate FNEG. 366 setOperationAction(ISD::FNEG , MVT::f32, Custom); 367 368 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 369 370 // Use ANDPS and ORPS to simulate FCOPYSIGN. 371 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 372 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 373 374 // We don't support sin/cos/fmod 375 setOperationAction(ISD::FSIN , MVT::f32, Expand); 376 setOperationAction(ISD::FCOS , MVT::f32, Expand); 377 setOperationAction(ISD::FREM , MVT::f32, Expand); 378 379 // Expand FP immediates into loads from the stack, except for the special 380 // cases we handle. 381 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 382 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 383 addLegalFPImmediate(APFloat(+0.0f)); // xorps 384 addLegalFPImmediate(APFloat(+0.0)); // FLD0 385 addLegalFPImmediate(APFloat(+1.0)); // FLD1 386 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 387 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 388 389 // SSE->x87 conversions go through memory. 390 setConvertAction(MVT::f32, MVT::f64, Expand); 391 setConvertAction(MVT::f32, MVT::f80, Expand); 392 393 // x87->SSE truncations need to go through memory. 394 setConvertAction(MVT::f80, MVT::f32, Expand); 395 setConvertAction(MVT::f64, MVT::f32, Expand); 396 // And x87->x87 truncations also. 397 setConvertAction(MVT::f80, MVT::f64, Expand); 398 399 if (!UnsafeFPMath) { 400 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 401 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 402 } 403 } else { 404 // f32 and f64 in x87. 405 // Set up the FP register classes. 406 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 407 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 408 409 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 410 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 411 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 412 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 413 414 // Floating truncations need to go through memory. 415 setConvertAction(MVT::f80, MVT::f32, Expand); 416 setConvertAction(MVT::f64, MVT::f32, Expand); 417 setConvertAction(MVT::f80, MVT::f64, Expand); 418 419 if (!UnsafeFPMath) { 420 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 421 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 422 } 423 424 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 425 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 426 addLegalFPImmediate(APFloat(+0.0)); // FLD0 427 addLegalFPImmediate(APFloat(+1.0)); // FLD1 428 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 429 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 430 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 431 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 432 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 433 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 434 } 435 436 // Long double always uses X87. 437 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 438 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 439 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 440 setOperationAction(ISD::ConstantFP, MVT::f80, Expand); 441 if (!UnsafeFPMath) { 442 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 443 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 444 } 445 446 // Always use a library call for pow. 447 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 448 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 449 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 450 451 // First set operation action for all vector types to expand. Then we 452 // will selectively turn on ones that can be effectively codegen'd. 453 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 454 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 455 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); 456 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); 457 setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); 458 setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); 459 setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); 460 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 461 setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); 462 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 463 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 464 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 465 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 466 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 467 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); 468 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); 469 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 470 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 471 setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand); 472 setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand); 473 setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand); 474 setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand); 475 setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand); 476 setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand); 477 setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand); 478 setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand); 479 setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand); 480 setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand); 481 setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand); 482 setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand); 483 setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand); 484 setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand); 485 setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand); 486 } 487 488 if (Subtarget->hasMMX()) { 489 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass); 490 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass); 491 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass); 492 addRegisterClass(MVT::v1i64, X86::VR64RegisterClass); 493 494 // FIXME: add MMX packed arithmetics 495 496 setOperationAction(ISD::ADD, MVT::v8i8, Legal); 497 setOperationAction(ISD::ADD, MVT::v4i16, Legal); 498 setOperationAction(ISD::ADD, MVT::v2i32, Legal); 499 setOperationAction(ISD::ADD, MVT::v1i64, Legal); 500 501 setOperationAction(ISD::SUB, MVT::v8i8, Legal); 502 setOperationAction(ISD::SUB, MVT::v4i16, Legal); 503 setOperationAction(ISD::SUB, MVT::v2i32, Legal); 504 setOperationAction(ISD::SUB, MVT::v1i64, Legal); 505 506 setOperationAction(ISD::MULHS, MVT::v4i16, Legal); 507 setOperationAction(ISD::MUL, MVT::v4i16, Legal); 508 509 setOperationAction(ISD::AND, MVT::v8i8, Promote); 510 AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64); 511 setOperationAction(ISD::AND, MVT::v4i16, Promote); 512 AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64); 513 setOperationAction(ISD::AND, MVT::v2i32, Promote); 514 AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64); 515 setOperationAction(ISD::AND, MVT::v1i64, Legal); 516 517 setOperationAction(ISD::OR, MVT::v8i8, Promote); 518 AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64); 519 setOperationAction(ISD::OR, MVT::v4i16, Promote); 520 AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64); 521 setOperationAction(ISD::OR, MVT::v2i32, Promote); 522 AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64); 523 setOperationAction(ISD::OR, MVT::v1i64, Legal); 524 525 setOperationAction(ISD::XOR, MVT::v8i8, Promote); 526 AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64); 527 setOperationAction(ISD::XOR, MVT::v4i16, Promote); 528 AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64); 529 setOperationAction(ISD::XOR, MVT::v2i32, Promote); 530 AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64); 531 setOperationAction(ISD::XOR, MVT::v1i64, Legal); 532 533 setOperationAction(ISD::LOAD, MVT::v8i8, Promote); 534 AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64); 535 setOperationAction(ISD::LOAD, MVT::v4i16, Promote); 536 AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64); 537 setOperationAction(ISD::LOAD, MVT::v2i32, Promote); 538 AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64); 539 setOperationAction(ISD::LOAD, MVT::v1i64, Legal); 540 541 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); 542 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 543 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); 544 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); 545 546 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 547 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 548 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); 549 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); 550 551 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom); 552 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom); 553 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Custom); 554 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom); 555 } 556 557 if (Subtarget->hasSSE1()) { 558 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 559 560 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 561 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 562 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 563 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 564 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 565 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 566 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 567 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 568 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 569 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 570 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 571 } 572 573 if (Subtarget->hasSSE2()) { 574 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 575 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 576 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 577 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 578 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 579 580 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 581 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 582 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 583 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 584 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 585 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 586 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 587 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 588 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 589 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 590 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 591 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 592 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 593 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 594 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 595 596 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 597 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 598 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 599 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 600 // Implement v4f32 insert_vector_elt in terms of SSE2 v8i16 ones. 601 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 602 603 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 604 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 605 // Do not attempt to custom lower non-power-of-2 vectors 606 if (!isPowerOf2_32(MVT::getVectorNumElements(VT))) 607 continue; 608 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); 609 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); 610 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); 611 } 612 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 613 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 614 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 615 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 616 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 617 if (Subtarget->is64Bit()) 618 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 619 620 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 621 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 622 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); 623 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); 624 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); 625 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); 626 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); 627 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); 628 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); 629 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); 630 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 631 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); 632 } 633 634 // Custom lower v2i64 and v2f64 selects. 635 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 636 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 637 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 638 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 639 } 640 641 // We want to custom lower some of our intrinsics. 642 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 643 644 // We have target-specific dag combine patterns for the following nodes: 645 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 646 setTargetDAGCombine(ISD::SELECT); 647 648 computeRegisterProperties(); 649 650 // FIXME: These should be based on subtarget info. Plus, the values should 651 // be smaller when we are in optimizing for size mode. 652 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores 653 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores 654 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores 655 allowUnalignedMemoryAccesses = true; // x86 supports it! 656} 657 658 659/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 660/// jumptable. 661SDOperand X86TargetLowering::getPICJumpTableRelocBase(SDOperand Table, 662 SelectionDAG &DAG) const { 663 if (usesGlobalOffsetTable()) 664 return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy()); 665 if (!Subtarget->isPICStyleRIPRel()) 666 return DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()); 667 return Table; 668} 669 670//===----------------------------------------------------------------------===// 671// Return Value Calling Convention Implementation 672//===----------------------------------------------------------------------===// 673 674#include "X86GenCallingConv.inc" 675 676/// GetPossiblePreceedingTailCall - Get preceeding X86ISD::TAILCALL node if it 677/// exists skip possible ISD:TokenFactor. 678static SDOperand GetPossiblePreceedingTailCall(SDOperand Chain) { 679 if (Chain.getOpcode()==X86ISD::TAILCALL) { 680 return Chain; 681 } else if (Chain.getOpcode()==ISD::TokenFactor) { 682 if (Chain.getNumOperands() && 683 Chain.getOperand(0).getOpcode()==X86ISD::TAILCALL) 684 return Chain.getOperand(0); 685 } 686 return Chain; 687} 688 689/// LowerRET - Lower an ISD::RET node. 690SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { 691 assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args"); 692 693 SmallVector<CCValAssign, 16> RVLocs; 694 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 695 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 696 CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs); 697 CCInfo.AnalyzeReturn(Op.Val, RetCC_X86); 698 699 // If this is the first return lowered for this function, add the regs to the 700 // liveout set for the function. 701 if (DAG.getMachineFunction().liveout_empty()) { 702 for (unsigned i = 0; i != RVLocs.size(); ++i) 703 if (RVLocs[i].isRegLoc()) 704 DAG.getMachineFunction().addLiveOut(RVLocs[i].getLocReg()); 705 } 706 SDOperand Chain = Op.getOperand(0); 707 708 // Handle tail call return. 709 Chain = GetPossiblePreceedingTailCall(Chain); 710 if (Chain.getOpcode() == X86ISD::TAILCALL) { 711 SDOperand TailCall = Chain; 712 SDOperand TargetAddress = TailCall.getOperand(1); 713 SDOperand StackAdjustment = TailCall.getOperand(2); 714 assert ( ((TargetAddress.getOpcode() == ISD::Register && 715 (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::ECX || 716 cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) || 717 TargetAddress.getOpcode() == ISD::TargetExternalSymbol || 718 TargetAddress.getOpcode() == ISD::TargetGlobalAddress) && 719 "Expecting an global address, external symbol, or register"); 720 assert( StackAdjustment.getOpcode() == ISD::Constant && 721 "Expecting a const value"); 722 723 SmallVector<SDOperand,8> Operands; 724 Operands.push_back(Chain.getOperand(0)); 725 Operands.push_back(TargetAddress); 726 Operands.push_back(StackAdjustment); 727 // Copy registers used by the call. Last operand is a flag so it is not 728 // copied. 729 for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) { 730 Operands.push_back(Chain.getOperand(i)); 731 } 732 return DAG.getNode(X86ISD::TC_RETURN, MVT::Other, &Operands[0], 733 Operands.size()); 734 } 735 736 // Regular return. 737 SDOperand Flag; 738 739 // Copy the result values into the output registers. 740 if (RVLocs.size() != 1 || !RVLocs[0].isRegLoc() || 741 RVLocs[0].getLocReg() != X86::ST0) { 742 for (unsigned i = 0; i != RVLocs.size(); ++i) { 743 CCValAssign &VA = RVLocs[i]; 744 assert(VA.isRegLoc() && "Can only return in registers!"); 745 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1), 746 Flag); 747 Flag = Chain.getValue(1); 748 } 749 } else { 750 // We need to handle a destination of ST0 specially, because it isn't really 751 // a register. 752 SDOperand Value = Op.getOperand(1); 753 754 // If this is an FP return with ScalarSSE, we need to move the value from 755 // an XMM register onto the fp-stack. 756 if ((X86ScalarSSEf32 && RVLocs[0].getValVT()==MVT::f32) || 757 (X86ScalarSSEf64 && RVLocs[0].getValVT()==MVT::f64)) { 758 SDOperand MemLoc; 759 760 // If this is a load into a scalarsse value, don't store the loaded value 761 // back to the stack, only to reload it: just replace the scalar-sse load. 762 if (ISD::isNON_EXTLoad(Value.Val) && 763 (Chain == Value.getValue(1) || Chain == Value.getOperand(0))) { 764 Chain = Value.getOperand(0); 765 MemLoc = Value.getOperand(1); 766 } else { 767 // Spill the value to memory and reload it into top of stack. 768 unsigned Size = MVT::getSizeInBits(RVLocs[0].getValVT())/8; 769 MachineFunction &MF = DAG.getMachineFunction(); 770 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 771 MemLoc = DAG.getFrameIndex(SSFI, getPointerTy()); 772 Chain = DAG.getStore(Op.getOperand(0), Value, MemLoc, NULL, 0); 773 } 774 SDVTList Tys = DAG.getVTList(RVLocs[0].getValVT(), MVT::Other); 775 SDOperand Ops[] = {Chain, MemLoc, DAG.getValueType(RVLocs[0].getValVT())}; 776 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 777 Chain = Value.getValue(1); 778 } 779 780 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 781 SDOperand Ops[] = { Chain, Value }; 782 Chain = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops, 2); 783 Flag = Chain.getValue(1); 784 } 785 786 SDOperand BytesToPop = DAG.getConstant(getBytesToPopOnReturn(), MVT::i16); 787 if (Flag.Val) 788 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop, Flag); 789 else 790 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop); 791} 792 793 794/// LowerCallResult - Lower the result values of an ISD::CALL into the 795/// appropriate copies out of appropriate physical registers. This assumes that 796/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call 797/// being lowered. The returns a SDNode with the same number of values as the 798/// ISD::CALL. 799SDNode *X86TargetLowering:: 800LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, 801 unsigned CallingConv, SelectionDAG &DAG) { 802 803 // Assign locations to each value returned by this call. 804 SmallVector<CCValAssign, 16> RVLocs; 805 bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0; 806 CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs); 807 CCInfo.AnalyzeCallResult(TheCall, RetCC_X86); 808 809 810 SmallVector<SDOperand, 8> ResultVals; 811 812 // Copy all of the result registers out of their specified physreg. 813 if (RVLocs.size() != 1 || RVLocs[0].getLocReg() != X86::ST0) { 814 for (unsigned i = 0; i != RVLocs.size(); ++i) { 815 Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(), 816 RVLocs[i].getValVT(), InFlag).getValue(1); 817 InFlag = Chain.getValue(2); 818 ResultVals.push_back(Chain.getValue(0)); 819 } 820 } else { 821 // Copies from the FP stack are special, as ST0 isn't a valid register 822 // before the fp stackifier runs. 823 824 // Copy ST0 into an RFP register with FP_GET_RESULT. 825 SDVTList Tys = DAG.getVTList(RVLocs[0].getValVT(), MVT::Other, MVT::Flag); 826 SDOperand GROps[] = { Chain, InFlag }; 827 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, GROps, 2); 828 Chain = RetVal.getValue(1); 829 InFlag = RetVal.getValue(2); 830 831 // If we are using ScalarSSE, store ST(0) to the stack and reload it into 832 // an XMM register. 833 if ((X86ScalarSSEf32 && RVLocs[0].getValVT() == MVT::f32) || 834 (X86ScalarSSEf64 && RVLocs[0].getValVT() == MVT::f64)) { 835 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This 836 // shouldn't be necessary except that RFP cannot be live across 837 // multiple blocks. When stackifier is fixed, they can be uncoupled. 838 MachineFunction &MF = DAG.getMachineFunction(); 839 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 840 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 841 SDOperand Ops[] = { 842 Chain, RetVal, StackSlot, DAG.getValueType(RVLocs[0].getValVT()), InFlag 843 }; 844 Chain = DAG.getNode(X86ISD::FST, MVT::Other, Ops, 5); 845 RetVal = DAG.getLoad(RVLocs[0].getValVT(), Chain, StackSlot, NULL, 0); 846 Chain = RetVal.getValue(1); 847 } 848 ResultVals.push_back(RetVal); 849 } 850 851 // Merge everything together with a MERGE_VALUES node. 852 ResultVals.push_back(Chain); 853 return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(), 854 &ResultVals[0], ResultVals.size()).Val; 855} 856 857 858//===----------------------------------------------------------------------===// 859// C & StdCall & Fast Calling Convention implementation 860//===----------------------------------------------------------------------===// 861// StdCall calling convention seems to be standard for many Windows' API 862// routines and around. It differs from C calling convention just a little: 863// callee should clean up the stack, not caller. Symbols should be also 864// decorated in some fancy way :) It doesn't support any vector arguments. 865// For info on fast calling convention see Fast Calling Convention (tail call) 866// implementation LowerX86_32FastCCCallTo. 867 868/// AddLiveIn - This helper function adds the specified physical register to the 869/// MachineFunction as a live in value. It also creates a corresponding virtual 870/// register for it. 871static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg, 872 const TargetRegisterClass *RC) { 873 assert(RC->contains(PReg) && "Not the correct regclass!"); 874 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC); 875 MF.addLiveIn(PReg, VReg); 876 return VReg; 877} 878 879// align stack arguments according to platform alignment needed for tail calls 880unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG& DAG); 881 882SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG, 883 const CCValAssign &VA, 884 MachineFrameInfo *MFI, 885 SDOperand Root, unsigned i) { 886 // Create the nodes corresponding to a load from this parameter slot. 887 int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, 888 VA.getLocMemOffset()); 889 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 890 891 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue(); 892 893 if (Flags & ISD::ParamFlags::ByVal) 894 return FIN; 895 else 896 return DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0); 897} 898 899SDOperand X86TargetLowering::LowerCCCArguments(SDOperand Op, SelectionDAG &DAG, 900 bool isStdCall) { 901 unsigned NumArgs = Op.Val->getNumValues() - 1; 902 MachineFunction &MF = DAG.getMachineFunction(); 903 MachineFrameInfo *MFI = MF.getFrameInfo(); 904 SDOperand Root = Op.getOperand(0); 905 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 906 unsigned CC = MF.getFunction()->getCallingConv(); 907 // Assign locations to all of the incoming arguments. 908 SmallVector<CCValAssign, 16> ArgLocs; 909 CCState CCInfo(CC, isVarArg, 910 getTargetMachine(), ArgLocs); 911 // Check for possible tail call calling convention. 912 if (CC == CallingConv::Fast && PerformTailCallOpt) 913 CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_TailCall); 914 else 915 CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_C); 916 917 SmallVector<SDOperand, 8> ArgValues; 918 unsigned LastVal = ~0U; 919 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 920 CCValAssign &VA = ArgLocs[i]; 921 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 922 // places. 923 assert(VA.getValNo() != LastVal && 924 "Don't support value assigned to multiple locs yet"); 925 LastVal = VA.getValNo(); 926 927 if (VA.isRegLoc()) { 928 MVT::ValueType RegVT = VA.getLocVT(); 929 TargetRegisterClass *RC; 930 if (RegVT == MVT::i32) 931 RC = X86::GR32RegisterClass; 932 else { 933 assert(MVT::isVector(RegVT)); 934 RC = X86::VR128RegisterClass; 935 } 936 937 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 938 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 939 940 // If this is an 8 or 16-bit value, it is really passed promoted to 32 941 // bits. Insert an assert[sz]ext to capture this, then truncate to the 942 // right size. 943 if (VA.getLocInfo() == CCValAssign::SExt) 944 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 945 DAG.getValueType(VA.getValVT())); 946 else if (VA.getLocInfo() == CCValAssign::ZExt) 947 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 948 DAG.getValueType(VA.getValVT())); 949 950 if (VA.getLocInfo() != CCValAssign::Full) 951 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 952 953 ArgValues.push_back(ArgValue); 954 } else { 955 assert(VA.isMemLoc()); 956 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i)); 957 } 958 } 959 960 unsigned StackSize = CCInfo.getNextStackOffset(); 961 // align stack specially for tail calls 962 if (CC==CallingConv::Fast) 963 StackSize = GetAlignedArgumentStackSize(StackSize,DAG); 964 965 ArgValues.push_back(Root); 966 967 // If the function takes variable number of arguments, make a frame index for 968 // the start of the first vararg value... for expansion of llvm.va_start. 969 if (isVarArg) 970 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); 971 972 // Tail call calling convention (CallingConv::Fast) does not support varargs. 973 assert( !(isVarArg && CC == CallingConv::Fast) && 974 "CallingConv::Fast does not support varargs."); 975 976 if (isStdCall && !isVarArg && 977 (CC==CallingConv::Fast && PerformTailCallOpt || CC!=CallingConv::Fast)) { 978 BytesToPopOnReturn = StackSize; // Callee pops everything.. 979 BytesCallerReserves = 0; 980 } else { 981 BytesToPopOnReturn = 0; // Callee pops nothing. 982 983 // If this is an sret function, the return should pop the hidden pointer. 984 if (NumArgs && 985 (cast<ConstantSDNode>(Op.getOperand(3))->getValue() & 986 ISD::ParamFlags::StructReturn)) 987 BytesToPopOnReturn = 4; 988 989 BytesCallerReserves = StackSize; 990 } 991 992 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only. 993 994 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 995 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 996 997 // Return the new list of results. 998 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 999 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1000} 1001 1002SDOperand X86TargetLowering::LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, 1003 unsigned CC) { 1004 SDOperand Chain = Op.getOperand(0); 1005 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1006 SDOperand Callee = Op.getOperand(4); 1007 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 1008 1009 // Analyze operands of the call, assigning locations to each operand. 1010 SmallVector<CCValAssign, 16> ArgLocs; 1011 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1012 if(CC==CallingConv::Fast && PerformTailCallOpt) 1013 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_TailCall); 1014 else 1015 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_C); 1016 1017 // Get a count of how many bytes are to be pushed on the stack. 1018 unsigned NumBytes = CCInfo.getNextStackOffset(); 1019 if (CC==CallingConv::Fast) 1020 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 1021 1022 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 1023 1024 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1025 SmallVector<SDOperand, 8> MemOpChains; 1026 1027 SDOperand StackPtr; 1028 1029 // Walk the register/memloc assignments, inserting copies/loads. 1030 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1031 CCValAssign &VA = ArgLocs[i]; 1032 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1033 1034 // Promote the value if needed. 1035 switch (VA.getLocInfo()) { 1036 default: assert(0 && "Unknown loc info!"); 1037 case CCValAssign::Full: break; 1038 case CCValAssign::SExt: 1039 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1040 break; 1041 case CCValAssign::ZExt: 1042 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1043 break; 1044 case CCValAssign::AExt: 1045 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1046 break; 1047 } 1048 1049 if (VA.isRegLoc()) { 1050 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1051 } else { 1052 assert(VA.isMemLoc()); 1053 if (StackPtr.Val == 0) 1054 StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy()); 1055 1056 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1057 Arg)); 1058 } 1059 } 1060 1061 // If the first argument is an sret pointer, remember it. 1062 bool isSRet = NumOps && 1063 (cast<ConstantSDNode>(Op.getOperand(6))->getValue() & 1064 ISD::ParamFlags::StructReturn); 1065 1066 if (!MemOpChains.empty()) 1067 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1068 &MemOpChains[0], MemOpChains.size()); 1069 1070 // Build a sequence of copy-to-reg nodes chained together with token chain 1071 // and flag operands which copy the outgoing args into registers. 1072 SDOperand InFlag; 1073 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1074 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1075 InFlag); 1076 InFlag = Chain.getValue(1); 1077 } 1078 1079 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1080 // GOT pointer. 1081 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1082 Subtarget->isPICStyleGOT()) { 1083 Chain = DAG.getCopyToReg(Chain, X86::EBX, 1084 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 1085 InFlag); 1086 InFlag = Chain.getValue(1); 1087 } 1088 1089 // If the callee is a GlobalAddress node (quite common, every direct call is) 1090 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1091 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1092 // We should use extra load for direct calls to dllimported functions in 1093 // non-JIT mode. 1094 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1095 getTargetMachine(), true)) 1096 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1097 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1098 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1099 1100 // Returns a chain & a flag for retval copy to use. 1101 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1102 SmallVector<SDOperand, 8> Ops; 1103 Ops.push_back(Chain); 1104 Ops.push_back(Callee); 1105 1106 // Add argument registers to the end of the list so that they are known live 1107 // into the call. 1108 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1109 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1110 RegsToPass[i].second.getValueType())); 1111 1112 // Add an implicit use GOT pointer in EBX. 1113 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1114 Subtarget->isPICStyleGOT()) 1115 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 1116 1117 if (InFlag.Val) 1118 Ops.push_back(InFlag); 1119 1120 Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size()); 1121 InFlag = Chain.getValue(1); 1122 1123 // Create the CALLSEQ_END node. 1124 unsigned NumBytesForCalleeToPush = 0; 1125 1126 if (CC == CallingConv::X86_StdCall || 1127 (CC == CallingConv::Fast && PerformTailCallOpt)) { 1128 if (isVarArg) 1129 NumBytesForCalleeToPush = isSRet ? 4 : 0; 1130 else 1131 NumBytesForCalleeToPush = NumBytes; 1132 assert(!(isVarArg && CC==CallingConv::Fast) && 1133 "CallingConv::Fast does not support varargs."); 1134 } else { 1135 // If this is is a call to a struct-return function, the callee 1136 // pops the hidden struct pointer, so we have to push it back. 1137 // This is common for Darwin/X86, Linux & Mingw32 targets. 1138 NumBytesForCalleeToPush = isSRet ? 4 : 0; 1139 } 1140 1141 Chain = DAG.getCALLSEQ_END(Chain, 1142 DAG.getConstant(NumBytes, getPointerTy()), 1143 DAG.getConstant(NumBytesForCalleeToPush, 1144 getPointerTy()), 1145 InFlag); 1146 InFlag = Chain.getValue(1); 1147 1148 // Handle result values, copying them out of physregs into vregs that we 1149 // return. 1150 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1151} 1152 1153 1154//===----------------------------------------------------------------------===// 1155// FastCall Calling Convention implementation 1156//===----------------------------------------------------------------------===// 1157// 1158// The X86 'fastcall' calling convention passes up to two integer arguments in 1159// registers (an appropriate portion of ECX/EDX), passes arguments in C order, 1160// and requires that the callee pop its arguments off the stack (allowing proper 1161// tail calls), and has the same return value conventions as C calling convs. 1162// 1163// This calling convention always arranges for the callee pop value to be 8n+4 1164// bytes, which is needed for tail recursion elimination and stack alignment 1165// reasons. 1166SDOperand 1167X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG) { 1168 MachineFunction &MF = DAG.getMachineFunction(); 1169 MachineFrameInfo *MFI = MF.getFrameInfo(); 1170 SDOperand Root = Op.getOperand(0); 1171 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1172 1173 // Assign locations to all of the incoming arguments. 1174 SmallVector<CCValAssign, 16> ArgLocs; 1175 CCState CCInfo(MF.getFunction()->getCallingConv(), isVarArg, 1176 getTargetMachine(), ArgLocs); 1177 CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_FastCall); 1178 1179 SmallVector<SDOperand, 8> ArgValues; 1180 unsigned LastVal = ~0U; 1181 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1182 CCValAssign &VA = ArgLocs[i]; 1183 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1184 // places. 1185 assert(VA.getValNo() != LastVal && 1186 "Don't support value assigned to multiple locs yet"); 1187 LastVal = VA.getValNo(); 1188 1189 if (VA.isRegLoc()) { 1190 MVT::ValueType RegVT = VA.getLocVT(); 1191 TargetRegisterClass *RC; 1192 if (RegVT == MVT::i32) 1193 RC = X86::GR32RegisterClass; 1194 else { 1195 assert(MVT::isVector(RegVT)); 1196 RC = X86::VR128RegisterClass; 1197 } 1198 1199 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 1200 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 1201 1202 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1203 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1204 // right size. 1205 if (VA.getLocInfo() == CCValAssign::SExt) 1206 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 1207 DAG.getValueType(VA.getValVT())); 1208 else if (VA.getLocInfo() == CCValAssign::ZExt) 1209 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 1210 DAG.getValueType(VA.getValVT())); 1211 1212 if (VA.getLocInfo() != CCValAssign::Full) 1213 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 1214 1215 ArgValues.push_back(ArgValue); 1216 } else { 1217 assert(VA.isMemLoc()); 1218 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i)); 1219 } 1220 } 1221 1222 ArgValues.push_back(Root); 1223 1224 unsigned StackSize = CCInfo.getNextStackOffset(); 1225 1226 if (!Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows()) { 1227 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1228 // arguments and the arguments after the retaddr has been pushed are 1229 // aligned. 1230 if ((StackSize & 7) == 0) 1231 StackSize += 4; 1232 } 1233 1234 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 1235 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only. 1236 BytesToPopOnReturn = StackSize; // Callee pops all stack arguments. 1237 BytesCallerReserves = 0; 1238 1239 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1240 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 1241 1242 // Return the new list of results. 1243 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1244 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1245} 1246 1247SDOperand 1248X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG, 1249 const SDOperand &StackPtr, 1250 const CCValAssign &VA, 1251 SDOperand Chain, 1252 SDOperand Arg) { 1253 SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy()); 1254 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1255 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1256 unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue(); 1257 if (Flags & ISD::ParamFlags::ByVal) { 1258 unsigned Align = 1 << ((Flags & ISD::ParamFlags::ByValAlign) >> 1259 ISD::ParamFlags::ByValAlignOffs); 1260 1261 unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >> 1262 ISD::ParamFlags::ByValSizeOffs; 1263 1264 SDOperand AlignNode = DAG.getConstant(Align, MVT::i32); 1265 SDOperand SizeNode = DAG.getConstant(Size, MVT::i32); 1266 SDOperand AlwaysInline = DAG.getConstant(1, MVT::i32); 1267 1268 return DAG.getMemcpy(Chain, PtrOff, Arg, SizeNode, AlignNode, 1269 AlwaysInline); 1270 } else { 1271 return DAG.getStore(Chain, Arg, PtrOff, NULL, 0); 1272 } 1273} 1274 1275SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG, 1276 unsigned CC) { 1277 SDOperand Chain = Op.getOperand(0); 1278 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 1279 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1280 SDOperand Callee = Op.getOperand(4); 1281 1282 // Analyze operands of the call, assigning locations to each operand. 1283 SmallVector<CCValAssign, 16> ArgLocs; 1284 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1285 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_FastCall); 1286 1287 // Get a count of how many bytes are to be pushed on the stack. 1288 unsigned NumBytes = CCInfo.getNextStackOffset(); 1289 1290 if (!Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows()) { 1291 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1292 // arguments and the arguments after the retaddr has been pushed are 1293 // aligned. 1294 if ((NumBytes & 7) == 0) 1295 NumBytes += 4; 1296 } 1297 1298 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 1299 1300 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1301 SmallVector<SDOperand, 8> MemOpChains; 1302 1303 SDOperand StackPtr; 1304 1305 // Walk the register/memloc assignments, inserting copies/loads. 1306 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1307 CCValAssign &VA = ArgLocs[i]; 1308 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1309 1310 // Promote the value if needed. 1311 switch (VA.getLocInfo()) { 1312 default: assert(0 && "Unknown loc info!"); 1313 case CCValAssign::Full: break; 1314 case CCValAssign::SExt: 1315 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1316 break; 1317 case CCValAssign::ZExt: 1318 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1319 break; 1320 case CCValAssign::AExt: 1321 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1322 break; 1323 } 1324 1325 if (VA.isRegLoc()) { 1326 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1327 } else { 1328 assert(VA.isMemLoc()); 1329 if (StackPtr.Val == 0) 1330 StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy()); 1331 1332 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1333 Arg)); 1334 } 1335 } 1336 1337 if (!MemOpChains.empty()) 1338 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1339 &MemOpChains[0], MemOpChains.size()); 1340 1341 // Build a sequence of copy-to-reg nodes chained together with token chain 1342 // and flag operands which copy the outgoing args into registers. 1343 SDOperand InFlag; 1344 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1345 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1346 InFlag); 1347 InFlag = Chain.getValue(1); 1348 } 1349 1350 // If the callee is a GlobalAddress node (quite common, every direct call is) 1351 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1352 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1353 // We should use extra load for direct calls to dllimported functions in 1354 // non-JIT mode. 1355 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1356 getTargetMachine(), true)) 1357 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1358 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1359 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1360 1361 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1362 // GOT pointer. 1363 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1364 Subtarget->isPICStyleGOT()) { 1365 Chain = DAG.getCopyToReg(Chain, X86::EBX, 1366 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 1367 InFlag); 1368 InFlag = Chain.getValue(1); 1369 } 1370 1371 // Returns a chain & a flag for retval copy to use. 1372 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1373 SmallVector<SDOperand, 8> Ops; 1374 Ops.push_back(Chain); 1375 Ops.push_back(Callee); 1376 1377 // Add argument registers to the end of the list so that they are known live 1378 // into the call. 1379 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1380 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1381 RegsToPass[i].second.getValueType())); 1382 1383 // Add an implicit use GOT pointer in EBX. 1384 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1385 Subtarget->isPICStyleGOT()) 1386 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 1387 1388 if (InFlag.Val) 1389 Ops.push_back(InFlag); 1390 1391 assert(isTailCall==false && "no tail call here"); 1392 Chain = DAG.getNode(X86ISD::CALL, 1393 NodeTys, &Ops[0], Ops.size()); 1394 InFlag = Chain.getValue(1); 1395 1396 // Returns a flag for retval copy to use. 1397 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1398 Ops.clear(); 1399 Ops.push_back(Chain); 1400 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1401 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1402 Ops.push_back(InFlag); 1403 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1404 InFlag = Chain.getValue(1); 1405 1406 // Handle result values, copying them out of physregs into vregs that we 1407 // return. 1408 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1409} 1410 1411//===----------------------------------------------------------------------===// 1412// Fast Calling Convention (tail call) implementation 1413//===----------------------------------------------------------------------===// 1414 1415// Like std call, callee cleans arguments, convention except that ECX is 1416// reserved for storing the tail called function address. Only 2 registers are 1417// free for argument passing (inreg). Tail call optimization is performed 1418// provided: 1419// * tailcallopt is enabled 1420// * caller/callee are fastcc 1421// * elf/pic is disabled OR 1422// * elf/pic enabled + callee is in module + callee has 1423// visibility protected or hidden 1424// To keep the stack aligned according to platform abi the function 1425// GetAlignedArgumentStackSize ensures that argument delta is always multiples 1426// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 1427// If a tail called function callee has more arguments than the caller the 1428// caller needs to make sure that there is room to move the RETADDR to. This is 1429// achieved by reserving an area the size of the argument delta right after the 1430// original REtADDR, but before the saved framepointer or the spilled registers 1431// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 1432// stack layout: 1433// arg1 1434// arg2 1435// RETADDR 1436// [ new RETADDR 1437// move area ] 1438// (possible EBP) 1439// ESI 1440// EDI 1441// local1 .. 1442 1443/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 1444/// for a 16 byte align requirement. 1445unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 1446 SelectionDAG& DAG) { 1447 if (PerformTailCallOpt) { 1448 MachineFunction &MF = DAG.getMachineFunction(); 1449 const TargetMachine &TM = MF.getTarget(); 1450 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 1451 unsigned StackAlignment = TFI.getStackAlignment(); 1452 uint64_t AlignMask = StackAlignment - 1; 1453 int64_t Offset = StackSize; 1454 unsigned SlotSize = Subtarget->is64Bit() ? 8 : 4; 1455 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 1456 // Number smaller than 12 so just add the difference. 1457 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 1458 } else { 1459 // Mask out lower bits, add stackalignment once plus the 12 bytes. 1460 Offset = ((~AlignMask) & Offset) + StackAlignment + 1461 (StackAlignment-SlotSize); 1462 } 1463 StackSize = Offset; 1464 } 1465 return StackSize; 1466} 1467 1468/// IsEligibleForTailCallElimination - Check to see whether the next instruction 1469/// following the call is a return. A function is eligible if caller/callee 1470/// calling conventions match, currently only fastcc supports tail calls, and 1471/// the function CALL is immediatly followed by a RET. 1472bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call, 1473 SDOperand Ret, 1474 SelectionDAG& DAG) const { 1475 if (!PerformTailCallOpt) 1476 return false; 1477 1478 // Check whether CALL node immediatly preceeds the RET node and whether the 1479 // return uses the result of the node or is a void return. 1480 unsigned NumOps = Ret.getNumOperands(); 1481 if ((NumOps == 1 && 1482 (Ret.getOperand(0) == SDOperand(Call.Val,1) || 1483 Ret.getOperand(0) == SDOperand(Call.Val,0))) || 1484 (NumOps > 1 && 1485 Ret.getOperand(0) == SDOperand(Call.Val,Call.Val->getNumValues()-1) && 1486 Ret.getOperand(1) == SDOperand(Call.Val,0))) { 1487 MachineFunction &MF = DAG.getMachineFunction(); 1488 unsigned CallerCC = MF.getFunction()->getCallingConv(); 1489 unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue(); 1490 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 1491 SDOperand Callee = Call.getOperand(4); 1492 // On elf/pic %ebx needs to be livein. 1493 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ || 1494 !Subtarget->isPICStyleGOT()) 1495 return true; 1496 1497 // Can only do local tail calls with PIC. 1498 GlobalValue * GV = 0; 1499 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 1500 if(G != 0 && 1501 (GV = G->getGlobal()) && 1502 (GV->hasHiddenVisibility() || GV->hasProtectedVisibility())) 1503 return true; 1504 } 1505 } 1506 1507 return false; 1508} 1509 1510SDOperand X86TargetLowering::LowerX86_TailCallTo(SDOperand Op, 1511 SelectionDAG &DAG, 1512 unsigned CC) { 1513 SDOperand Chain = Op.getOperand(0); 1514 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1515 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 1516 SDOperand Callee = Op.getOperand(4); 1517 bool is64Bit = Subtarget->is64Bit(); 1518 1519 assert(isTailCall && PerformTailCallOpt && "Should only emit tail calls."); 1520 1521 // Analyze operands of the call, assigning locations to each operand. 1522 SmallVector<CCValAssign, 16> ArgLocs; 1523 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1524 if (is64Bit) 1525 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_TailCall); 1526 else 1527 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_TailCall); 1528 1529 1530 // Lower arguments at fp - stackoffset + fpdiff. 1531 MachineFunction &MF = DAG.getMachineFunction(); 1532 1533 unsigned NumBytesToBePushed = 1534 GetAlignedArgumentStackSize(CCInfo.getNextStackOffset(), DAG); 1535 1536 unsigned NumBytesCallerPushed = 1537 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 1538 int FPDiff = NumBytesCallerPushed - NumBytesToBePushed; 1539 1540 // Set the delta of movement of the returnaddr stackslot. 1541 // But only set if delta is greater than previous delta. 1542 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 1543 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 1544 1545 Chain = DAG. 1546 getCALLSEQ_START(Chain, DAG.getConstant(NumBytesToBePushed, getPointerTy())); 1547 1548 // Adjust the Return address stack slot. 1549 SDOperand RetAddrFrIdx, NewRetAddrFrIdx; 1550 if (FPDiff) { 1551 MVT::ValueType VT = is64Bit ? MVT::i64 : MVT::i32; 1552 RetAddrFrIdx = getReturnAddressFrameIndex(DAG); 1553 // Load the "old" Return address. 1554 RetAddrFrIdx = 1555 DAG.getLoad(VT, Chain,RetAddrFrIdx, NULL, 0); 1556 // Calculate the new stack slot for the return address. 1557 int SlotSize = is64Bit ? 8 : 4; 1558 int NewReturnAddrFI = 1559 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize); 1560 NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 1561 Chain = SDOperand(RetAddrFrIdx.Val, 1); 1562 } 1563 1564 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1565 SmallVector<SDOperand, 8> MemOpChains; 1566 SmallVector<SDOperand, 8> MemOpChains2; 1567 SDOperand FramePtr, StackPtr; 1568 SDOperand PtrOff; 1569 SDOperand FIN; 1570 int FI = 0; 1571 1572 // Walk the register/memloc assignments, inserting copies/loads. Lower 1573 // arguments first to the stack slot where they would normally - in case of a 1574 // normal function call - be. 1575 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1576 CCValAssign &VA = ArgLocs[i]; 1577 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1578 1579 // Promote the value if needed. 1580 switch (VA.getLocInfo()) { 1581 default: assert(0 && "Unknown loc info!"); 1582 case CCValAssign::Full: break; 1583 case CCValAssign::SExt: 1584 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1585 break; 1586 case CCValAssign::ZExt: 1587 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1588 break; 1589 case CCValAssign::AExt: 1590 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1591 break; 1592 } 1593 1594 if (VA.isRegLoc()) { 1595 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1596 } else { 1597 assert(VA.isMemLoc()); 1598 if (StackPtr.Val == 0) 1599 StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy()); 1600 1601 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1602 Arg)); 1603 } 1604 } 1605 1606 if (!MemOpChains.empty()) 1607 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1608 &MemOpChains[0], MemOpChains.size()); 1609 1610 // Build a sequence of copy-to-reg nodes chained together with token chain 1611 // and flag operands which copy the outgoing args into registers. 1612 SDOperand InFlag; 1613 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1614 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1615 InFlag); 1616 InFlag = Chain.getValue(1); 1617 } 1618 InFlag = SDOperand(); 1619 1620 // Copy from stack slots to stack slot of a tail called function. This needs 1621 // to be done because if we would lower the arguments directly to their real 1622 // stack slot we might end up overwriting each other. 1623 // TODO: To make this more efficient (sometimes saving a store/load) we could 1624 // analyse the arguments and emit this store/load/store sequence only for 1625 // arguments which would be overwritten otherwise. 1626 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1627 CCValAssign &VA = ArgLocs[i]; 1628 if (!VA.isRegLoc()) { 1629 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1630 unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue(); 1631 1632 // Get source stack slot. 1633 SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy()); 1634 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1635 // Create frame index. 1636 int32_t Offset = VA.getLocMemOffset()+FPDiff; 1637 uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8; 1638 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); 1639 FIN = DAG.getFrameIndex(FI, MVT::i32); 1640 if (Flags & ISD::ParamFlags::ByVal) { 1641 // Copy relative to framepointer. 1642 unsigned Align = 1 << ((Flags & ISD::ParamFlags::ByValAlign) >> 1643 ISD::ParamFlags::ByValAlignOffs); 1644 1645 unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >> 1646 ISD::ParamFlags::ByValSizeOffs; 1647 1648 SDOperand AlignNode = DAG.getConstant(Align, MVT::i32); 1649 SDOperand SizeNode = DAG.getConstant(Size, MVT::i32); 1650 SDOperand AlwaysInline = DAG.getConstant(1, MVT::i1); 1651 1652 MemOpChains2.push_back(DAG.getMemcpy(Chain, FIN, PtrOff, SizeNode, 1653 AlignNode,AlwaysInline)); 1654 } else { 1655 SDOperand LoadedArg = DAG.getLoad(VA.getValVT(), Chain, PtrOff, NULL,0); 1656 // Store relative to framepointer. 1657 MemOpChains2.push_back(DAG.getStore(Chain, LoadedArg, FIN, NULL, 0)); 1658 } 1659 } 1660 } 1661 1662 if (!MemOpChains2.empty()) 1663 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1664 &MemOpChains2[0], MemOpChains.size()); 1665 1666 // Store the return address to the appropriate stack slot. 1667 if (FPDiff) 1668 Chain = DAG.getStore(Chain,RetAddrFrIdx, NewRetAddrFrIdx, NULL, 0); 1669 1670 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1671 // GOT pointer. 1672 // Does not work with tail call since ebx is not restored correctly by 1673 // tailcaller. TODO: at least for x86 - verify for x86-64 1674 1675 // If the callee is a GlobalAddress node (quite common, every direct call is) 1676 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1677 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1678 // We should use extra load for direct calls to dllimported functions in 1679 // non-JIT mode. 1680 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1681 getTargetMachine(), true)) 1682 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1683 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1684 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1685 else { 1686 assert(Callee.getOpcode() == ISD::LOAD && 1687 "Function destination must be loaded into virtual register"); 1688 unsigned Opc = is64Bit ? X86::R9 : X86::ECX; 1689 1690 Chain = DAG.getCopyToReg(Chain, 1691 DAG.getRegister(Opc, getPointerTy()) , 1692 Callee,InFlag); 1693 Callee = DAG.getRegister(Opc, getPointerTy()); 1694 // Add register as live out. 1695 DAG.getMachineFunction().addLiveOut(Opc); 1696 } 1697 1698 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1699 SmallVector<SDOperand, 8> Ops; 1700 1701 Ops.push_back(Chain); 1702 Ops.push_back(DAG.getConstant(NumBytesToBePushed, getPointerTy())); 1703 Ops.push_back(DAG.getConstant(0, getPointerTy())); 1704 if (InFlag.Val) 1705 Ops.push_back(InFlag); 1706 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1707 InFlag = Chain.getValue(1); 1708 1709 // Returns a chain & a flag for retval copy to use. 1710 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1711 Ops.clear(); 1712 Ops.push_back(Chain); 1713 Ops.push_back(Callee); 1714 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 1715 // Add argument registers to the end of the list so that they are known live 1716 // into the call. 1717 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1718 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1719 RegsToPass[i].second.getValueType())); 1720 if (InFlag.Val) 1721 Ops.push_back(InFlag); 1722 assert(InFlag.Val && 1723 "Flag must be set. Depend on flag being set in LowerRET"); 1724 Chain = DAG.getNode(X86ISD::TAILCALL, 1725 Op.Val->getVTList(), &Ops[0], Ops.size()); 1726 1727 return SDOperand(Chain.Val, Op.ResNo); 1728} 1729 1730//===----------------------------------------------------------------------===// 1731// X86-64 C Calling Convention implementation 1732//===----------------------------------------------------------------------===// 1733 1734SDOperand 1735X86TargetLowering::LowerX86_64CCCArguments(SDOperand Op, SelectionDAG &DAG) { 1736 MachineFunction &MF = DAG.getMachineFunction(); 1737 MachineFrameInfo *MFI = MF.getFrameInfo(); 1738 SDOperand Root = Op.getOperand(0); 1739 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1740 unsigned CC= MF.getFunction()->getCallingConv(); 1741 1742 static const unsigned GPR64ArgRegs[] = { 1743 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1744 }; 1745 static const unsigned XMMArgRegs[] = { 1746 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1747 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1748 }; 1749 1750 1751 // Assign locations to all of the incoming arguments. 1752 SmallVector<CCValAssign, 16> ArgLocs; 1753 CCState CCInfo(CC, isVarArg, 1754 getTargetMachine(), ArgLocs); 1755 if (CC == CallingConv::Fast && PerformTailCallOpt) 1756 CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_64_TailCall); 1757 else 1758 CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_64_C); 1759 1760 SmallVector<SDOperand, 8> ArgValues; 1761 unsigned LastVal = ~0U; 1762 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1763 CCValAssign &VA = ArgLocs[i]; 1764 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1765 // places. 1766 assert(VA.getValNo() != LastVal && 1767 "Don't support value assigned to multiple locs yet"); 1768 LastVal = VA.getValNo(); 1769 1770 if (VA.isRegLoc()) { 1771 MVT::ValueType RegVT = VA.getLocVT(); 1772 TargetRegisterClass *RC; 1773 if (RegVT == MVT::i32) 1774 RC = X86::GR32RegisterClass; 1775 else if (RegVT == MVT::i64) 1776 RC = X86::GR64RegisterClass; 1777 else if (RegVT == MVT::f32) 1778 RC = X86::FR32RegisterClass; 1779 else if (RegVT == MVT::f64) 1780 RC = X86::FR64RegisterClass; 1781 else { 1782 assert(MVT::isVector(RegVT)); 1783 if (MVT::getSizeInBits(RegVT) == 64) { 1784 RC = X86::GR64RegisterClass; // MMX values are passed in GPRs. 1785 RegVT = MVT::i64; 1786 } else 1787 RC = X86::VR128RegisterClass; 1788 } 1789 1790 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 1791 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 1792 1793 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1794 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1795 // right size. 1796 if (VA.getLocInfo() == CCValAssign::SExt) 1797 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 1798 DAG.getValueType(VA.getValVT())); 1799 else if (VA.getLocInfo() == CCValAssign::ZExt) 1800 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 1801 DAG.getValueType(VA.getValVT())); 1802 1803 if (VA.getLocInfo() != CCValAssign::Full) 1804 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 1805 1806 // Handle MMX values passed in GPRs. 1807 if (RegVT != VA.getLocVT() && RC == X86::GR64RegisterClass && 1808 MVT::getSizeInBits(RegVT) == 64) 1809 ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); 1810 1811 ArgValues.push_back(ArgValue); 1812 } else { 1813 assert(VA.isMemLoc()); 1814 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i)); 1815 } 1816 } 1817 1818 unsigned StackSize = CCInfo.getNextStackOffset(); 1819 if (CC==CallingConv::Fast) 1820 StackSize =GetAlignedArgumentStackSize(StackSize, DAG); 1821 1822 // If the function takes variable number of arguments, make a frame index for 1823 // the start of the first vararg value... for expansion of llvm.va_start. 1824 if (isVarArg) { 1825 assert(CC!=CallingConv::Fast 1826 && "Var arg not supported with calling convention fastcc"); 1827 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 6); 1828 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1829 1830 // For X86-64, if there are vararg parameters that are passed via 1831 // registers, then we must store them to their spots on the stack so they 1832 // may be loaded by deferencing the result of va_next. 1833 VarArgsGPOffset = NumIntRegs * 8; 1834 VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16; 1835 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); 1836 RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16); 1837 1838 // Store the integer parameter registers. 1839 SmallVector<SDOperand, 8> MemOps; 1840 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 1841 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1842 DAG.getConstant(VarArgsGPOffset, getPointerTy())); 1843 for (; NumIntRegs != 6; ++NumIntRegs) { 1844 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], 1845 X86::GR64RegisterClass); 1846 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1847 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1848 MemOps.push_back(Store); 1849 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1850 DAG.getConstant(8, getPointerTy())); 1851 } 1852 1853 // Now store the XMM (fp + vector) parameter registers. 1854 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1855 DAG.getConstant(VarArgsFPOffset, getPointerTy())); 1856 for (; NumXMMRegs != 8; ++NumXMMRegs) { 1857 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 1858 X86::VR128RegisterClass); 1859 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); 1860 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1861 MemOps.push_back(Store); 1862 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1863 DAG.getConstant(16, getPointerTy())); 1864 } 1865 if (!MemOps.empty()) 1866 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 1867 &MemOps[0], MemOps.size()); 1868 } 1869 1870 ArgValues.push_back(Root); 1871 // Tail call convention (fastcc) needs callee pop. 1872 if (CC == CallingConv::Fast && PerformTailCallOpt) { 1873 BytesToPopOnReturn = StackSize; // Callee pops everything. 1874 BytesCallerReserves = 0; 1875 } else { 1876 BytesToPopOnReturn = 0; // Callee pops nothing. 1877 BytesCallerReserves = StackSize; 1878 } 1879 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1880 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 1881 1882 // Return the new list of results. 1883 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1884 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1885} 1886 1887SDOperand 1888X86TargetLowering::LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG, 1889 unsigned CC) { 1890 SDOperand Chain = Op.getOperand(0); 1891 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1892 SDOperand Callee = Op.getOperand(4); 1893 1894 // Analyze operands of the call, assigning locations to each operand. 1895 SmallVector<CCValAssign, 16> ArgLocs; 1896 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1897 if (CC==CallingConv::Fast && PerformTailCallOpt) 1898 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_TailCall); 1899 else 1900 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_C); 1901 1902 // Get a count of how many bytes are to be pushed on the stack. 1903 unsigned NumBytes = CCInfo.getNextStackOffset(); 1904 if (CC == CallingConv::Fast) 1905 NumBytes = GetAlignedArgumentStackSize(NumBytes,DAG); 1906 1907 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 1908 1909 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1910 SmallVector<SDOperand, 8> MemOpChains; 1911 1912 SDOperand StackPtr; 1913 1914 // Walk the register/memloc assignments, inserting copies/loads. 1915 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1916 CCValAssign &VA = ArgLocs[i]; 1917 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1918 1919 // Promote the value if needed. 1920 switch (VA.getLocInfo()) { 1921 default: assert(0 && "Unknown loc info!"); 1922 case CCValAssign::Full: break; 1923 case CCValAssign::SExt: 1924 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1925 break; 1926 case CCValAssign::ZExt: 1927 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1928 break; 1929 case CCValAssign::AExt: 1930 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1931 break; 1932 } 1933 1934 if (VA.isRegLoc()) { 1935 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1936 } else { 1937 assert(VA.isMemLoc()); 1938 if (StackPtr.Val == 0) 1939 StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy()); 1940 1941 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1942 Arg)); 1943 } 1944 } 1945 1946 if (!MemOpChains.empty()) 1947 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1948 &MemOpChains[0], MemOpChains.size()); 1949 1950 // Build a sequence of copy-to-reg nodes chained together with token chain 1951 // and flag operands which copy the outgoing args into registers. 1952 SDOperand InFlag; 1953 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1954 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1955 InFlag); 1956 InFlag = Chain.getValue(1); 1957 } 1958 1959 if (isVarArg) { 1960 assert ( CallingConv::Fast != CC && 1961 "Var args not supported with calling convention fastcc"); 1962 1963 // From AMD64 ABI document: 1964 // For calls that may call functions that use varargs or stdargs 1965 // (prototype-less calls or calls to functions containing ellipsis (...) in 1966 // the declaration) %al is used as hidden argument to specify the number 1967 // of SSE registers used. The contents of %al do not need to match exactly 1968 // the number of registers, but must be an ubound on the number of SSE 1969 // registers used and is in the range 0 - 8 inclusive. 1970 1971 // Count the number of XMM registers allocated. 1972 static const unsigned XMMArgRegs[] = { 1973 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1974 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1975 }; 1976 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1977 1978 Chain = DAG.getCopyToReg(Chain, X86::AL, 1979 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 1980 InFlag = Chain.getValue(1); 1981 } 1982 1983 // If the callee is a GlobalAddress node (quite common, every direct call is) 1984 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1985 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1986 // We should use extra load for direct calls to dllimported functions in 1987 // non-JIT mode. 1988 if (getTargetMachine().getCodeModel() != CodeModel::Large 1989 && !Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1990 getTargetMachine(), true)) 1991 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1992 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1993 if (getTargetMachine().getCodeModel() != CodeModel::Large) 1994 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1995 1996 // Returns a chain & a flag for retval copy to use. 1997 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1998 SmallVector<SDOperand, 8> Ops; 1999 Ops.push_back(Chain); 2000 Ops.push_back(Callee); 2001 2002 // Add argument registers to the end of the list so that they are known live 2003 // into the call. 2004 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2005 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2006 RegsToPass[i].second.getValueType())); 2007 2008 if (InFlag.Val) 2009 Ops.push_back(InFlag); 2010 2011 Chain = DAG.getNode(X86ISD::CALL, 2012 NodeTys, &Ops[0], Ops.size()); 2013 InFlag = Chain.getValue(1); 2014 int NumBytesForCalleeToPush = 0; 2015 if (CC==CallingConv::Fast && PerformTailCallOpt) { 2016 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 2017 } else { 2018 NumBytesForCalleeToPush = 0; // Callee pops nothing. 2019 } 2020 // Returns a flag for retval copy to use. 2021 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 2022 Ops.clear(); 2023 Ops.push_back(Chain); 2024 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 2025 Ops.push_back(DAG.getConstant(NumBytesForCalleeToPush, getPointerTy())); 2026 Ops.push_back(InFlag); 2027 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 2028 InFlag = Chain.getValue(1); 2029 2030 // Handle result values, copying them out of physregs into vregs that we 2031 // return. 2032 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 2033} 2034 2035 2036//===----------------------------------------------------------------------===// 2037// Other Lowering Hooks 2038//===----------------------------------------------------------------------===// 2039 2040 2041SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { 2042 MachineFunction &MF = DAG.getMachineFunction(); 2043 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 2044 int ReturnAddrIndex = FuncInfo->getRAIndex(); 2045 2046 if (ReturnAddrIndex == 0) { 2047 // Set up a frame object for the return address. 2048 if (Subtarget->is64Bit()) 2049 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8); 2050 else 2051 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4); 2052 2053 FuncInfo->setRAIndex(ReturnAddrIndex); 2054 } 2055 2056 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 2057} 2058 2059 2060 2061/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86 2062/// specific condition code. It returns a false if it cannot do a direct 2063/// translation. X86CC is the translated CondCode. LHS/RHS are modified as 2064/// needed. 2065static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 2066 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS, 2067 SelectionDAG &DAG) { 2068 X86CC = X86::COND_INVALID; 2069 if (!isFP) { 2070 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 2071 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 2072 // X > -1 -> X == 0, jump !sign. 2073 RHS = DAG.getConstant(0, RHS.getValueType()); 2074 X86CC = X86::COND_NS; 2075 return true; 2076 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 2077 // X < 0 -> X == 0, jump on sign. 2078 X86CC = X86::COND_S; 2079 return true; 2080 } else if (SetCCOpcode == ISD::SETLT && RHSC->getValue() == 1) { 2081 // X < 1 -> X <= 0 2082 RHS = DAG.getConstant(0, RHS.getValueType()); 2083 X86CC = X86::COND_LE; 2084 return true; 2085 } 2086 } 2087 2088 switch (SetCCOpcode) { 2089 default: break; 2090 case ISD::SETEQ: X86CC = X86::COND_E; break; 2091 case ISD::SETGT: X86CC = X86::COND_G; break; 2092 case ISD::SETGE: X86CC = X86::COND_GE; break; 2093 case ISD::SETLT: X86CC = X86::COND_L; break; 2094 case ISD::SETLE: X86CC = X86::COND_LE; break; 2095 case ISD::SETNE: X86CC = X86::COND_NE; break; 2096 case ISD::SETULT: X86CC = X86::COND_B; break; 2097 case ISD::SETUGT: X86CC = X86::COND_A; break; 2098 case ISD::SETULE: X86CC = X86::COND_BE; break; 2099 case ISD::SETUGE: X86CC = X86::COND_AE; break; 2100 } 2101 } else { 2102 // On a floating point condition, the flags are set as follows: 2103 // ZF PF CF op 2104 // 0 | 0 | 0 | X > Y 2105 // 0 | 0 | 1 | X < Y 2106 // 1 | 0 | 0 | X == Y 2107 // 1 | 1 | 1 | unordered 2108 bool Flip = false; 2109 switch (SetCCOpcode) { 2110 default: break; 2111 case ISD::SETUEQ: 2112 case ISD::SETEQ: X86CC = X86::COND_E; break; 2113 case ISD::SETOLT: Flip = true; // Fallthrough 2114 case ISD::SETOGT: 2115 case ISD::SETGT: X86CC = X86::COND_A; break; 2116 case ISD::SETOLE: Flip = true; // Fallthrough 2117 case ISD::SETOGE: 2118 case ISD::SETGE: X86CC = X86::COND_AE; break; 2119 case ISD::SETUGT: Flip = true; // Fallthrough 2120 case ISD::SETULT: 2121 case ISD::SETLT: X86CC = X86::COND_B; break; 2122 case ISD::SETUGE: Flip = true; // Fallthrough 2123 case ISD::SETULE: 2124 case ISD::SETLE: X86CC = X86::COND_BE; break; 2125 case ISD::SETONE: 2126 case ISD::SETNE: X86CC = X86::COND_NE; break; 2127 case ISD::SETUO: X86CC = X86::COND_P; break; 2128 case ISD::SETO: X86CC = X86::COND_NP; break; 2129 } 2130 if (Flip) 2131 std::swap(LHS, RHS); 2132 } 2133 2134 return X86CC != X86::COND_INVALID; 2135} 2136 2137/// hasFPCMov - is there a floating point cmov for the specific X86 condition 2138/// code. Current x86 isa includes the following FP cmov instructions: 2139/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 2140static bool hasFPCMov(unsigned X86CC) { 2141 switch (X86CC) { 2142 default: 2143 return false; 2144 case X86::COND_B: 2145 case X86::COND_BE: 2146 case X86::COND_E: 2147 case X86::COND_P: 2148 case X86::COND_A: 2149 case X86::COND_AE: 2150 case X86::COND_NE: 2151 case X86::COND_NP: 2152 return true; 2153 } 2154} 2155 2156/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return 2157/// true if Op is undef or if its value falls within the specified range (L, H]. 2158static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { 2159 if (Op.getOpcode() == ISD::UNDEF) 2160 return true; 2161 2162 unsigned Val = cast<ConstantSDNode>(Op)->getValue(); 2163 return (Val >= Low && Val < Hi); 2164} 2165 2166/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return 2167/// true if Op is undef or if its value equal to the specified value. 2168static bool isUndefOrEqual(SDOperand Op, unsigned Val) { 2169 if (Op.getOpcode() == ISD::UNDEF) 2170 return true; 2171 return cast<ConstantSDNode>(Op)->getValue() == Val; 2172} 2173 2174/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand 2175/// specifies a shuffle of elements that is suitable for input to PSHUFD. 2176bool X86::isPSHUFDMask(SDNode *N) { 2177 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2178 2179 if (N->getNumOperands() != 2 && N->getNumOperands() != 4) 2180 return false; 2181 2182 // Check if the value doesn't reference the second vector. 2183 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2184 SDOperand Arg = N->getOperand(i); 2185 if (Arg.getOpcode() == ISD::UNDEF) continue; 2186 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2187 if (cast<ConstantSDNode>(Arg)->getValue() >= e) 2188 return false; 2189 } 2190 2191 return true; 2192} 2193 2194/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand 2195/// specifies a shuffle of elements that is suitable for input to PSHUFHW. 2196bool X86::isPSHUFHWMask(SDNode *N) { 2197 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2198 2199 if (N->getNumOperands() != 8) 2200 return false; 2201 2202 // Lower quadword copied in order. 2203 for (unsigned i = 0; i != 4; ++i) { 2204 SDOperand Arg = N->getOperand(i); 2205 if (Arg.getOpcode() == ISD::UNDEF) continue; 2206 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2207 if (cast<ConstantSDNode>(Arg)->getValue() != i) 2208 return false; 2209 } 2210 2211 // Upper quadword shuffled. 2212 for (unsigned i = 4; i != 8; ++i) { 2213 SDOperand Arg = N->getOperand(i); 2214 if (Arg.getOpcode() == ISD::UNDEF) continue; 2215 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2216 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2217 if (Val < 4 || Val > 7) 2218 return false; 2219 } 2220 2221 return true; 2222} 2223 2224/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand 2225/// specifies a shuffle of elements that is suitable for input to PSHUFLW. 2226bool X86::isPSHUFLWMask(SDNode *N) { 2227 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2228 2229 if (N->getNumOperands() != 8) 2230 return false; 2231 2232 // Upper quadword copied in order. 2233 for (unsigned i = 4; i != 8; ++i) 2234 if (!isUndefOrEqual(N->getOperand(i), i)) 2235 return false; 2236 2237 // Lower quadword shuffled. 2238 for (unsigned i = 0; i != 4; ++i) 2239 if (!isUndefOrInRange(N->getOperand(i), 0, 4)) 2240 return false; 2241 2242 return true; 2243} 2244 2245/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 2246/// specifies a shuffle of elements that is suitable for input to SHUFP*. 2247static bool isSHUFPMask(const SDOperand *Elems, unsigned NumElems) { 2248 if (NumElems != 2 && NumElems != 4) return false; 2249 2250 unsigned Half = NumElems / 2; 2251 for (unsigned i = 0; i < Half; ++i) 2252 if (!isUndefOrInRange(Elems[i], 0, NumElems)) 2253 return false; 2254 for (unsigned i = Half; i < NumElems; ++i) 2255 if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2)) 2256 return false; 2257 2258 return true; 2259} 2260 2261bool X86::isSHUFPMask(SDNode *N) { 2262 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2263 return ::isSHUFPMask(N->op_begin(), N->getNumOperands()); 2264} 2265 2266/// isCommutedSHUFP - Returns true if the shuffle mask is exactly 2267/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 2268/// half elements to come from vector 1 (which would equal the dest.) and 2269/// the upper half to come from vector 2. 2270static bool isCommutedSHUFP(const SDOperand *Ops, unsigned NumOps) { 2271 if (NumOps != 2 && NumOps != 4) return false; 2272 2273 unsigned Half = NumOps / 2; 2274 for (unsigned i = 0; i < Half; ++i) 2275 if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2)) 2276 return false; 2277 for (unsigned i = Half; i < NumOps; ++i) 2278 if (!isUndefOrInRange(Ops[i], 0, NumOps)) 2279 return false; 2280 return true; 2281} 2282 2283static bool isCommutedSHUFP(SDNode *N) { 2284 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2285 return isCommutedSHUFP(N->op_begin(), N->getNumOperands()); 2286} 2287 2288/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 2289/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 2290bool X86::isMOVHLPSMask(SDNode *N) { 2291 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2292 2293 if (N->getNumOperands() != 4) 2294 return false; 2295 2296 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 2297 return isUndefOrEqual(N->getOperand(0), 6) && 2298 isUndefOrEqual(N->getOperand(1), 7) && 2299 isUndefOrEqual(N->getOperand(2), 2) && 2300 isUndefOrEqual(N->getOperand(3), 3); 2301} 2302 2303/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 2304/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 2305/// <2, 3, 2, 3> 2306bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) { 2307 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2308 2309 if (N->getNumOperands() != 4) 2310 return false; 2311 2312 // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3 2313 return isUndefOrEqual(N->getOperand(0), 2) && 2314 isUndefOrEqual(N->getOperand(1), 3) && 2315 isUndefOrEqual(N->getOperand(2), 2) && 2316 isUndefOrEqual(N->getOperand(3), 3); 2317} 2318 2319/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 2320/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 2321bool X86::isMOVLPMask(SDNode *N) { 2322 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2323 2324 unsigned NumElems = N->getNumOperands(); 2325 if (NumElems != 2 && NumElems != 4) 2326 return false; 2327 2328 for (unsigned i = 0; i < NumElems/2; ++i) 2329 if (!isUndefOrEqual(N->getOperand(i), i + NumElems)) 2330 return false; 2331 2332 for (unsigned i = NumElems/2; i < NumElems; ++i) 2333 if (!isUndefOrEqual(N->getOperand(i), i)) 2334 return false; 2335 2336 return true; 2337} 2338 2339/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand 2340/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D} 2341/// and MOVLHPS. 2342bool X86::isMOVHPMask(SDNode *N) { 2343 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2344 2345 unsigned NumElems = N->getNumOperands(); 2346 if (NumElems != 2 && NumElems != 4) 2347 return false; 2348 2349 for (unsigned i = 0; i < NumElems/2; ++i) 2350 if (!isUndefOrEqual(N->getOperand(i), i)) 2351 return false; 2352 2353 for (unsigned i = 0; i < NumElems/2; ++i) { 2354 SDOperand Arg = N->getOperand(i + NumElems/2); 2355 if (!isUndefOrEqual(Arg, i + NumElems)) 2356 return false; 2357 } 2358 2359 return true; 2360} 2361 2362/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 2363/// specifies a shuffle of elements that is suitable for input to UNPCKL. 2364bool static isUNPCKLMask(const SDOperand *Elts, unsigned NumElts, 2365 bool V2IsSplat = false) { 2366 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2367 return false; 2368 2369 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2370 SDOperand BitI = Elts[i]; 2371 SDOperand BitI1 = Elts[i+1]; 2372 if (!isUndefOrEqual(BitI, j)) 2373 return false; 2374 if (V2IsSplat) { 2375 if (isUndefOrEqual(BitI1, NumElts)) 2376 return false; 2377 } else { 2378 if (!isUndefOrEqual(BitI1, j + NumElts)) 2379 return false; 2380 } 2381 } 2382 2383 return true; 2384} 2385 2386bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) { 2387 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2388 return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2389} 2390 2391/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 2392/// specifies a shuffle of elements that is suitable for input to UNPCKH. 2393bool static isUNPCKHMask(const SDOperand *Elts, unsigned NumElts, 2394 bool V2IsSplat = false) { 2395 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2396 return false; 2397 2398 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2399 SDOperand BitI = Elts[i]; 2400 SDOperand BitI1 = Elts[i+1]; 2401 if (!isUndefOrEqual(BitI, j + NumElts/2)) 2402 return false; 2403 if (V2IsSplat) { 2404 if (isUndefOrEqual(BitI1, NumElts)) 2405 return false; 2406 } else { 2407 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts)) 2408 return false; 2409 } 2410 } 2411 2412 return true; 2413} 2414 2415bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) { 2416 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2417 return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2418} 2419 2420/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 2421/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 2422/// <0, 0, 1, 1> 2423bool X86::isUNPCKL_v_undef_Mask(SDNode *N) { 2424 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2425 2426 unsigned NumElems = N->getNumOperands(); 2427 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2428 return false; 2429 2430 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2431 SDOperand BitI = N->getOperand(i); 2432 SDOperand BitI1 = N->getOperand(i+1); 2433 2434 if (!isUndefOrEqual(BitI, j)) 2435 return false; 2436 if (!isUndefOrEqual(BitI1, j)) 2437 return false; 2438 } 2439 2440 return true; 2441} 2442 2443/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 2444/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 2445/// <2, 2, 3, 3> 2446bool X86::isUNPCKH_v_undef_Mask(SDNode *N) { 2447 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2448 2449 unsigned NumElems = N->getNumOperands(); 2450 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2451 return false; 2452 2453 for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { 2454 SDOperand BitI = N->getOperand(i); 2455 SDOperand BitI1 = N->getOperand(i + 1); 2456 2457 if (!isUndefOrEqual(BitI, j)) 2458 return false; 2459 if (!isUndefOrEqual(BitI1, j)) 2460 return false; 2461 } 2462 2463 return true; 2464} 2465 2466/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 2467/// specifies a shuffle of elements that is suitable for input to MOVSS, 2468/// MOVSD, and MOVD, i.e. setting the lowest element. 2469static bool isMOVLMask(const SDOperand *Elts, unsigned NumElts) { 2470 if (NumElts != 2 && NumElts != 4) 2471 return false; 2472 2473 if (!isUndefOrEqual(Elts[0], NumElts)) 2474 return false; 2475 2476 for (unsigned i = 1; i < NumElts; ++i) { 2477 if (!isUndefOrEqual(Elts[i], i)) 2478 return false; 2479 } 2480 2481 return true; 2482} 2483 2484bool X86::isMOVLMask(SDNode *N) { 2485 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2486 return ::isMOVLMask(N->op_begin(), N->getNumOperands()); 2487} 2488 2489/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 2490/// of what x86 movss want. X86 movs requires the lowest element to be lowest 2491/// element of vector 2 and the other elements to come from vector 1 in order. 2492static bool isCommutedMOVL(const SDOperand *Ops, unsigned NumOps, 2493 bool V2IsSplat = false, 2494 bool V2IsUndef = false) { 2495 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 2496 return false; 2497 2498 if (!isUndefOrEqual(Ops[0], 0)) 2499 return false; 2500 2501 for (unsigned i = 1; i < NumOps; ++i) { 2502 SDOperand Arg = Ops[i]; 2503 if (!(isUndefOrEqual(Arg, i+NumOps) || 2504 (V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) || 2505 (V2IsSplat && isUndefOrEqual(Arg, NumOps)))) 2506 return false; 2507 } 2508 2509 return true; 2510} 2511 2512static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false, 2513 bool V2IsUndef = false) { 2514 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2515 return isCommutedMOVL(N->op_begin(), N->getNumOperands(), 2516 V2IsSplat, V2IsUndef); 2517} 2518 2519/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2520/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 2521bool X86::isMOVSHDUPMask(SDNode *N) { 2522 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2523 2524 if (N->getNumOperands() != 4) 2525 return false; 2526 2527 // Expect 1, 1, 3, 3 2528 for (unsigned i = 0; i < 2; ++i) { 2529 SDOperand Arg = N->getOperand(i); 2530 if (Arg.getOpcode() == ISD::UNDEF) continue; 2531 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2532 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2533 if (Val != 1) return false; 2534 } 2535 2536 bool HasHi = false; 2537 for (unsigned i = 2; i < 4; ++i) { 2538 SDOperand Arg = N->getOperand(i); 2539 if (Arg.getOpcode() == ISD::UNDEF) continue; 2540 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2541 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2542 if (Val != 3) return false; 2543 HasHi = true; 2544 } 2545 2546 // Don't use movshdup if it can be done with a shufps. 2547 return HasHi; 2548} 2549 2550/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2551/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 2552bool X86::isMOVSLDUPMask(SDNode *N) { 2553 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2554 2555 if (N->getNumOperands() != 4) 2556 return false; 2557 2558 // Expect 0, 0, 2, 2 2559 for (unsigned i = 0; i < 2; ++i) { 2560 SDOperand Arg = N->getOperand(i); 2561 if (Arg.getOpcode() == ISD::UNDEF) continue; 2562 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2563 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2564 if (Val != 0) return false; 2565 } 2566 2567 bool HasHi = false; 2568 for (unsigned i = 2; i < 4; ++i) { 2569 SDOperand Arg = N->getOperand(i); 2570 if (Arg.getOpcode() == ISD::UNDEF) continue; 2571 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2572 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2573 if (Val != 2) return false; 2574 HasHi = true; 2575 } 2576 2577 // Don't use movshdup if it can be done with a shufps. 2578 return HasHi; 2579} 2580 2581/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand 2582/// specifies a identity operation on the LHS or RHS. 2583static bool isIdentityMask(SDNode *N, bool RHS = false) { 2584 unsigned NumElems = N->getNumOperands(); 2585 for (unsigned i = 0; i < NumElems; ++i) 2586 if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0))) 2587 return false; 2588 return true; 2589} 2590 2591/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2592/// a splat of a single element. 2593static bool isSplatMask(SDNode *N) { 2594 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2595 2596 // This is a splat operation if each element of the permute is the same, and 2597 // if the value doesn't reference the second vector. 2598 unsigned NumElems = N->getNumOperands(); 2599 SDOperand ElementBase; 2600 unsigned i = 0; 2601 for (; i != NumElems; ++i) { 2602 SDOperand Elt = N->getOperand(i); 2603 if (isa<ConstantSDNode>(Elt)) { 2604 ElementBase = Elt; 2605 break; 2606 } 2607 } 2608 2609 if (!ElementBase.Val) 2610 return false; 2611 2612 for (; i != NumElems; ++i) { 2613 SDOperand Arg = N->getOperand(i); 2614 if (Arg.getOpcode() == ISD::UNDEF) continue; 2615 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2616 if (Arg != ElementBase) return false; 2617 } 2618 2619 // Make sure it is a splat of the first vector operand. 2620 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems; 2621} 2622 2623/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2624/// a splat of a single element and it's a 2 or 4 element mask. 2625bool X86::isSplatMask(SDNode *N) { 2626 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2627 2628 // We can only splat 64-bit, and 32-bit quantities with a single instruction. 2629 if (N->getNumOperands() != 4 && N->getNumOperands() != 2) 2630 return false; 2631 return ::isSplatMask(N); 2632} 2633 2634/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand 2635/// specifies a splat of zero element. 2636bool X86::isSplatLoMask(SDNode *N) { 2637 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2638 2639 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) 2640 if (!isUndefOrEqual(N->getOperand(i), 0)) 2641 return false; 2642 return true; 2643} 2644 2645/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 2646/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* 2647/// instructions. 2648unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 2649 unsigned NumOperands = N->getNumOperands(); 2650 unsigned Shift = (NumOperands == 4) ? 2 : 1; 2651 unsigned Mask = 0; 2652 for (unsigned i = 0; i < NumOperands; ++i) { 2653 unsigned Val = 0; 2654 SDOperand Arg = N->getOperand(NumOperands-i-1); 2655 if (Arg.getOpcode() != ISD::UNDEF) 2656 Val = cast<ConstantSDNode>(Arg)->getValue(); 2657 if (Val >= NumOperands) Val -= NumOperands; 2658 Mask |= Val; 2659 if (i != NumOperands - 1) 2660 Mask <<= Shift; 2661 } 2662 2663 return Mask; 2664} 2665 2666/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 2667/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW 2668/// instructions. 2669unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 2670 unsigned Mask = 0; 2671 // 8 nodes, but we only care about the last 4. 2672 for (unsigned i = 7; i >= 4; --i) { 2673 unsigned Val = 0; 2674 SDOperand Arg = N->getOperand(i); 2675 if (Arg.getOpcode() != ISD::UNDEF) 2676 Val = cast<ConstantSDNode>(Arg)->getValue(); 2677 Mask |= (Val - 4); 2678 if (i != 4) 2679 Mask <<= 2; 2680 } 2681 2682 return Mask; 2683} 2684 2685/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 2686/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW 2687/// instructions. 2688unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 2689 unsigned Mask = 0; 2690 // 8 nodes, but we only care about the first 4. 2691 for (int i = 3; i >= 0; --i) { 2692 unsigned Val = 0; 2693 SDOperand Arg = N->getOperand(i); 2694 if (Arg.getOpcode() != ISD::UNDEF) 2695 Val = cast<ConstantSDNode>(Arg)->getValue(); 2696 Mask |= Val; 2697 if (i != 0) 2698 Mask <<= 2; 2699 } 2700 2701 return Mask; 2702} 2703 2704/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand 2705/// specifies a 8 element shuffle that can be broken into a pair of 2706/// PSHUFHW and PSHUFLW. 2707static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { 2708 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2709 2710 if (N->getNumOperands() != 8) 2711 return false; 2712 2713 // Lower quadword shuffled. 2714 for (unsigned i = 0; i != 4; ++i) { 2715 SDOperand Arg = N->getOperand(i); 2716 if (Arg.getOpcode() == ISD::UNDEF) continue; 2717 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2718 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2719 if (Val >= 4) 2720 return false; 2721 } 2722 2723 // Upper quadword shuffled. 2724 for (unsigned i = 4; i != 8; ++i) { 2725 SDOperand Arg = N->getOperand(i); 2726 if (Arg.getOpcode() == ISD::UNDEF) continue; 2727 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2728 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2729 if (Val < 4 || Val > 7) 2730 return false; 2731 } 2732 2733 return true; 2734} 2735 2736/// CommuteVectorShuffle - Swap vector_shuffle operands as well as 2737/// values in ther permute mask. 2738static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, 2739 SDOperand &V2, SDOperand &Mask, 2740 SelectionDAG &DAG) { 2741 MVT::ValueType VT = Op.getValueType(); 2742 MVT::ValueType MaskVT = Mask.getValueType(); 2743 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2744 unsigned NumElems = Mask.getNumOperands(); 2745 SmallVector<SDOperand, 8> MaskVec; 2746 2747 for (unsigned i = 0; i != NumElems; ++i) { 2748 SDOperand Arg = Mask.getOperand(i); 2749 if (Arg.getOpcode() == ISD::UNDEF) { 2750 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2751 continue; 2752 } 2753 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2754 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2755 if (Val < NumElems) 2756 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2757 else 2758 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2759 } 2760 2761 std::swap(V1, V2); 2762 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2763 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2764} 2765 2766/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 2767/// the two vector operands have swapped position. 2768static 2769SDOperand CommuteVectorShuffleMask(SDOperand Mask, SelectionDAG &DAG) { 2770 MVT::ValueType MaskVT = Mask.getValueType(); 2771 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2772 unsigned NumElems = Mask.getNumOperands(); 2773 SmallVector<SDOperand, 8> MaskVec; 2774 for (unsigned i = 0; i != NumElems; ++i) { 2775 SDOperand Arg = Mask.getOperand(i); 2776 if (Arg.getOpcode() == ISD::UNDEF) { 2777 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2778 continue; 2779 } 2780 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2781 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2782 if (Val < NumElems) 2783 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2784 else 2785 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2786 } 2787 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2788} 2789 2790 2791/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 2792/// match movhlps. The lower half elements should come from upper half of 2793/// V1 (and in order), and the upper half elements should come from the upper 2794/// half of V2 (and in order). 2795static bool ShouldXformToMOVHLPS(SDNode *Mask) { 2796 unsigned NumElems = Mask->getNumOperands(); 2797 if (NumElems != 4) 2798 return false; 2799 for (unsigned i = 0, e = 2; i != e; ++i) 2800 if (!isUndefOrEqual(Mask->getOperand(i), i+2)) 2801 return false; 2802 for (unsigned i = 2; i != 4; ++i) 2803 if (!isUndefOrEqual(Mask->getOperand(i), i+4)) 2804 return false; 2805 return true; 2806} 2807 2808/// isScalarLoadToVector - Returns true if the node is a scalar load that 2809/// is promoted to a vector. 2810static inline bool isScalarLoadToVector(SDNode *N) { 2811 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) { 2812 N = N->getOperand(0).Val; 2813 return ISD::isNON_EXTLoad(N); 2814 } 2815 return false; 2816} 2817 2818/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 2819/// match movlp{s|d}. The lower half elements should come from lower half of 2820/// V1 (and in order), and the upper half elements should come from the upper 2821/// half of V2 (and in order). And since V1 will become the source of the 2822/// MOVLP, it must be either a vector load or a scalar load to vector. 2823static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) { 2824 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 2825 return false; 2826 // Is V2 is a vector load, don't do this transformation. We will try to use 2827 // load folding shufps op. 2828 if (ISD::isNON_EXTLoad(V2)) 2829 return false; 2830 2831 unsigned NumElems = Mask->getNumOperands(); 2832 if (NumElems != 2 && NumElems != 4) 2833 return false; 2834 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 2835 if (!isUndefOrEqual(Mask->getOperand(i), i)) 2836 return false; 2837 for (unsigned i = NumElems/2; i != NumElems; ++i) 2838 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems)) 2839 return false; 2840 return true; 2841} 2842 2843/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 2844/// all the same. 2845static bool isSplatVector(SDNode *N) { 2846 if (N->getOpcode() != ISD::BUILD_VECTOR) 2847 return false; 2848 2849 SDOperand SplatValue = N->getOperand(0); 2850 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 2851 if (N->getOperand(i) != SplatValue) 2852 return false; 2853 return true; 2854} 2855 2856/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2857/// to an undef. 2858static bool isUndefShuffle(SDNode *N) { 2859 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2860 return false; 2861 2862 SDOperand V1 = N->getOperand(0); 2863 SDOperand V2 = N->getOperand(1); 2864 SDOperand Mask = N->getOperand(2); 2865 unsigned NumElems = Mask.getNumOperands(); 2866 for (unsigned i = 0; i != NumElems; ++i) { 2867 SDOperand Arg = Mask.getOperand(i); 2868 if (Arg.getOpcode() != ISD::UNDEF) { 2869 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2870 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF) 2871 return false; 2872 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF) 2873 return false; 2874 } 2875 } 2876 return true; 2877} 2878 2879/// isZeroNode - Returns true if Elt is a constant zero or a floating point 2880/// constant +0.0. 2881static inline bool isZeroNode(SDOperand Elt) { 2882 return ((isa<ConstantSDNode>(Elt) && 2883 cast<ConstantSDNode>(Elt)->getValue() == 0) || 2884 (isa<ConstantFPSDNode>(Elt) && 2885 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 2886} 2887 2888/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2889/// to an zero vector. 2890static bool isZeroShuffle(SDNode *N) { 2891 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2892 return false; 2893 2894 SDOperand V1 = N->getOperand(0); 2895 SDOperand V2 = N->getOperand(1); 2896 SDOperand Mask = N->getOperand(2); 2897 unsigned NumElems = Mask.getNumOperands(); 2898 for (unsigned i = 0; i != NumElems; ++i) { 2899 SDOperand Arg = Mask.getOperand(i); 2900 if (Arg.getOpcode() == ISD::UNDEF) 2901 continue; 2902 2903 unsigned Idx = cast<ConstantSDNode>(Arg)->getValue(); 2904 if (Idx < NumElems) { 2905 unsigned Opc = V1.Val->getOpcode(); 2906 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.Val)) 2907 continue; 2908 if (Opc != ISD::BUILD_VECTOR || 2909 !isZeroNode(V1.Val->getOperand(Idx))) 2910 return false; 2911 } else if (Idx >= NumElems) { 2912 unsigned Opc = V2.Val->getOpcode(); 2913 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.Val)) 2914 continue; 2915 if (Opc != ISD::BUILD_VECTOR || 2916 !isZeroNode(V2.Val->getOperand(Idx - NumElems))) 2917 return false; 2918 } 2919 } 2920 return true; 2921} 2922 2923/// getZeroVector - Returns a vector of specified type with all zero elements. 2924/// 2925static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) { 2926 assert(MVT::isVector(VT) && "Expected a vector type"); 2927 2928 // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2929 // type. This ensures they get CSE'd. 2930 SDOperand Cst = DAG.getTargetConstant(0, MVT::i32); 2931 SDOperand Vec; 2932 if (MVT::getSizeInBits(VT) == 64) // MMX 2933 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2934 else // SSE 2935 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2936 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2937} 2938 2939/// getOnesVector - Returns a vector of specified type with all bits set. 2940/// 2941static SDOperand getOnesVector(MVT::ValueType VT, SelectionDAG &DAG) { 2942 assert(MVT::isVector(VT) && "Expected a vector type"); 2943 2944 // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2945 // type. This ensures they get CSE'd. 2946 SDOperand Cst = DAG.getTargetConstant(~0U, MVT::i32); 2947 SDOperand Vec; 2948 if (MVT::getSizeInBits(VT) == 64) // MMX 2949 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2950 else // SSE 2951 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2952 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2953} 2954 2955 2956/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 2957/// that point to V2 points to its first element. 2958static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { 2959 assert(Mask.getOpcode() == ISD::BUILD_VECTOR); 2960 2961 bool Changed = false; 2962 SmallVector<SDOperand, 8> MaskVec; 2963 unsigned NumElems = Mask.getNumOperands(); 2964 for (unsigned i = 0; i != NumElems; ++i) { 2965 SDOperand Arg = Mask.getOperand(i); 2966 if (Arg.getOpcode() != ISD::UNDEF) { 2967 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2968 if (Val > NumElems) { 2969 Arg = DAG.getConstant(NumElems, Arg.getValueType()); 2970 Changed = true; 2971 } 2972 } 2973 MaskVec.push_back(Arg); 2974 } 2975 2976 if (Changed) 2977 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), 2978 &MaskVec[0], MaskVec.size()); 2979 return Mask; 2980} 2981 2982/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 2983/// operation of specified width. 2984static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { 2985 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2986 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2987 2988 SmallVector<SDOperand, 8> MaskVec; 2989 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); 2990 for (unsigned i = 1; i != NumElems; ++i) 2991 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2992 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2993} 2994 2995/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation 2996/// of specified width. 2997static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { 2998 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2999 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 3000 SmallVector<SDOperand, 8> MaskVec; 3001 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 3002 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3003 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); 3004 } 3005 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 3006} 3007 3008/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation 3009/// of specified width. 3010static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { 3011 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3012 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 3013 unsigned Half = NumElems/2; 3014 SmallVector<SDOperand, 8> MaskVec; 3015 for (unsigned i = 0; i != Half; ++i) { 3016 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); 3017 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); 3018 } 3019 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 3020} 3021 3022/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32. 3023/// 3024static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) { 3025 SDOperand V1 = Op.getOperand(0); 3026 SDOperand Mask = Op.getOperand(2); 3027 MVT::ValueType VT = Op.getValueType(); 3028 unsigned NumElems = Mask.getNumOperands(); 3029 Mask = getUnpacklMask(NumElems, DAG); 3030 while (NumElems != 4) { 3031 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask); 3032 NumElems >>= 1; 3033 } 3034 V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1); 3035 3036 Mask = getZeroVector(MVT::v4i32, DAG); 3037 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1, 3038 DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask); 3039 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); 3040} 3041 3042/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 3043/// vector of zero or undef vector. This produces a shuffle where the low 3044/// element of V2 is swizzled into the zero/undef vector, landing at element 3045/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 3046static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT, 3047 unsigned NumElems, unsigned Idx, 3048 bool isZero, SelectionDAG &DAG) { 3049 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT); 3050 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3051 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 3052 SmallVector<SDOperand, 16> MaskVec; 3053 for (unsigned i = 0; i != NumElems; ++i) 3054 if (i == Idx) // If this is the insertion idx, put the low elt of V2 here. 3055 MaskVec.push_back(DAG.getConstant(NumElems, EVT)); 3056 else 3057 MaskVec.push_back(DAG.getConstant(i, EVT)); 3058 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3059 &MaskVec[0], MaskVec.size()); 3060 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 3061} 3062 3063/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 3064/// 3065static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, 3066 unsigned NumNonZero, unsigned NumZero, 3067 SelectionDAG &DAG, TargetLowering &TLI) { 3068 if (NumNonZero > 8) 3069 return SDOperand(); 3070 3071 SDOperand V(0, 0); 3072 bool First = true; 3073 for (unsigned i = 0; i < 16; ++i) { 3074 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 3075 if (ThisIsNonZero && First) { 3076 if (NumZero) 3077 V = getZeroVector(MVT::v8i16, DAG); 3078 else 3079 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 3080 First = false; 3081 } 3082 3083 if ((i & 1) != 0) { 3084 SDOperand ThisElt(0, 0), LastElt(0, 0); 3085 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 3086 if (LastIsNonZero) { 3087 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); 3088 } 3089 if (ThisIsNonZero) { 3090 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i)); 3091 ThisElt = DAG.getNode(ISD::SHL, MVT::i16, 3092 ThisElt, DAG.getConstant(8, MVT::i8)); 3093 if (LastIsNonZero) 3094 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt); 3095 } else 3096 ThisElt = LastElt; 3097 3098 if (ThisElt.Val) 3099 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt, 3100 DAG.getConstant(i/2, TLI.getPointerTy())); 3101 } 3102 } 3103 3104 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V); 3105} 3106 3107/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 3108/// 3109static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, 3110 unsigned NumNonZero, unsigned NumZero, 3111 SelectionDAG &DAG, TargetLowering &TLI) { 3112 if (NumNonZero > 4) 3113 return SDOperand(); 3114 3115 SDOperand V(0, 0); 3116 bool First = true; 3117 for (unsigned i = 0; i < 8; ++i) { 3118 bool isNonZero = (NonZeros & (1 << i)) != 0; 3119 if (isNonZero) { 3120 if (First) { 3121 if (NumZero) 3122 V = getZeroVector(MVT::v8i16, DAG); 3123 else 3124 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 3125 First = false; 3126 } 3127 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i), 3128 DAG.getConstant(i, TLI.getPointerTy())); 3129 } 3130 } 3131 3132 return V; 3133} 3134 3135/// is4WideVector - Returns true if the specific v8i16 or v16i8 vector is 3136/// actually just a 4 wide vector. e.g. <a, a, y, y, d, d, x, x> 3137SDOperand 3138X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3139 // All zero's are handled with pxor, all one's are handled with pcmpeqd. 3140 if (ISD::isBuildVectorAllZeros(Op.Val) || ISD::isBuildVectorAllOnes(Op.Val)) { 3141 // Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to 3142 // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are 3143 // eliminated on x86-32 hosts. 3144 if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32) 3145 return Op; 3146 3147 if (ISD::isBuildVectorAllOnes(Op.Val)) 3148 return getOnesVector(Op.getValueType(), DAG); 3149 return getZeroVector(Op.getValueType(), DAG); 3150 } 3151 3152 MVT::ValueType VT = Op.getValueType(); 3153 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3154 unsigned EVTBits = MVT::getSizeInBits(EVT); 3155 3156 unsigned NumElems = Op.getNumOperands(); 3157 unsigned NumZero = 0; 3158 unsigned NumNonZero = 0; 3159 unsigned NonZeros = 0; 3160 unsigned NumNonZeroImms = 0; 3161 SmallSet<SDOperand, 8> Values; 3162 for (unsigned i = 0; i < NumElems; ++i) { 3163 SDOperand Elt = Op.getOperand(i); 3164 if (Elt.getOpcode() != ISD::UNDEF) { 3165 Values.insert(Elt); 3166 if (isZeroNode(Elt)) 3167 NumZero++; 3168 else { 3169 NonZeros |= (1 << i); 3170 NumNonZero++; 3171 if (Elt.getOpcode() == ISD::Constant || 3172 Elt.getOpcode() == ISD::ConstantFP) 3173 NumNonZeroImms++; 3174 } 3175 } 3176 } 3177 3178 if (NumNonZero == 0) { 3179 // All undef vector. Return an UNDEF. All zero vectors were handled above. 3180 return DAG.getNode(ISD::UNDEF, VT); 3181 } 3182 3183 // Splat is obviously ok. Let legalizer expand it to a shuffle. 3184 if (Values.size() == 1) 3185 return SDOperand(); 3186 3187 // Special case for single non-zero element. 3188 if (NumNonZero == 1) { 3189 unsigned Idx = CountTrailingZeros_32(NonZeros); 3190 SDOperand Item = Op.getOperand(Idx); 3191 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 3192 if (Idx == 0) 3193 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 3194 return getShuffleVectorZeroOrUndef(Item, VT, NumElems, Idx, 3195 NumZero > 0, DAG); 3196 3197 if (EVTBits == 32) { 3198 // Turn it into a shuffle of zero and zero-extended scalar to vector. 3199 Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0, 3200 DAG); 3201 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3202 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3203 SmallVector<SDOperand, 8> MaskVec; 3204 for (unsigned i = 0; i < NumElems; i++) 3205 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); 3206 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3207 &MaskVec[0], MaskVec.size()); 3208 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item, 3209 DAG.getNode(ISD::UNDEF, VT), Mask); 3210 } 3211 } 3212 3213 // A vector full of immediates; various special cases are already 3214 // handled, so this is best done with a single constant-pool load. 3215 if (NumNonZero == NumNonZeroImms) 3216 return SDOperand(); 3217 3218 // Let legalizer expand 2-wide build_vectors. 3219 if (EVTBits == 64) 3220 return SDOperand(); 3221 3222 // If element VT is < 32 bits, convert it to inserts into a zero vector. 3223 if (EVTBits == 8 && NumElems == 16) { 3224 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 3225 *this); 3226 if (V.Val) return V; 3227 } 3228 3229 if (EVTBits == 16 && NumElems == 8) { 3230 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 3231 *this); 3232 if (V.Val) return V; 3233 } 3234 3235 // If element VT is == 32 bits, turn it into a number of shuffles. 3236 SmallVector<SDOperand, 8> V; 3237 V.resize(NumElems); 3238 if (NumElems == 4 && NumZero > 0) { 3239 for (unsigned i = 0; i < 4; ++i) { 3240 bool isZero = !(NonZeros & (1 << i)); 3241 if (isZero) 3242 V[i] = getZeroVector(VT, DAG); 3243 else 3244 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3245 } 3246 3247 for (unsigned i = 0; i < 2; ++i) { 3248 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 3249 default: break; 3250 case 0: 3251 V[i] = V[i*2]; // Must be a zero vector. 3252 break; 3253 case 1: 3254 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2], 3255 getMOVLMask(NumElems, DAG)); 3256 break; 3257 case 2: 3258 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3259 getMOVLMask(NumElems, DAG)); 3260 break; 3261 case 3: 3262 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3263 getUnpacklMask(NumElems, DAG)); 3264 break; 3265 } 3266 } 3267 3268 // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd) 3269 // clears the upper bits. 3270 // FIXME: we can do the same for v4f32 case when we know both parts of 3271 // the lower half come from scalar_to_vector (loadf32). We should do 3272 // that in post legalizer dag combiner with target specific hooks. 3273 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0) 3274 return V[0]; 3275 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3276 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 3277 SmallVector<SDOperand, 8> MaskVec; 3278 bool Reverse = (NonZeros & 0x3) == 2; 3279 for (unsigned i = 0; i < 2; ++i) 3280 if (Reverse) 3281 MaskVec.push_back(DAG.getConstant(1-i, EVT)); 3282 else 3283 MaskVec.push_back(DAG.getConstant(i, EVT)); 3284 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 3285 for (unsigned i = 0; i < 2; ++i) 3286 if (Reverse) 3287 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); 3288 else 3289 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); 3290 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3291 &MaskVec[0], MaskVec.size()); 3292 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask); 3293 } 3294 3295 if (Values.size() > 2) { 3296 // Expand into a number of unpckl*. 3297 // e.g. for v4f32 3298 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 3299 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 3300 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 3301 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG); 3302 for (unsigned i = 0; i < NumElems; ++i) 3303 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3304 NumElems >>= 1; 3305 while (NumElems != 0) { 3306 for (unsigned i = 0; i < NumElems; ++i) 3307 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems], 3308 UnpckMask); 3309 NumElems >>= 1; 3310 } 3311 return V[0]; 3312 } 3313 3314 return SDOperand(); 3315} 3316 3317static 3318SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, 3319 SDOperand PermMask, SelectionDAG &DAG, 3320 TargetLowering &TLI) { 3321 SDOperand NewV; 3322 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(8); 3323 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3324 MVT::ValueType PtrVT = TLI.getPointerTy(); 3325 SmallVector<SDOperand, 8> MaskElts(PermMask.Val->op_begin(), 3326 PermMask.Val->op_end()); 3327 3328 // First record which half of which vector the low elements come from. 3329 SmallVector<unsigned, 4> LowQuad(4); 3330 for (unsigned i = 0; i < 4; ++i) { 3331 SDOperand Elt = MaskElts[i]; 3332 if (Elt.getOpcode() == ISD::UNDEF) 3333 continue; 3334 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3335 int QuadIdx = EltIdx / 4; 3336 ++LowQuad[QuadIdx]; 3337 } 3338 int BestLowQuad = -1; 3339 unsigned MaxQuad = 1; 3340 for (unsigned i = 0; i < 4; ++i) { 3341 if (LowQuad[i] > MaxQuad) { 3342 BestLowQuad = i; 3343 MaxQuad = LowQuad[i]; 3344 } 3345 } 3346 3347 // Record which half of which vector the high elements come from. 3348 SmallVector<unsigned, 4> HighQuad(4); 3349 for (unsigned i = 4; i < 8; ++i) { 3350 SDOperand Elt = MaskElts[i]; 3351 if (Elt.getOpcode() == ISD::UNDEF) 3352 continue; 3353 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3354 int QuadIdx = EltIdx / 4; 3355 ++HighQuad[QuadIdx]; 3356 } 3357 int BestHighQuad = -1; 3358 MaxQuad = 1; 3359 for (unsigned i = 0; i < 4; ++i) { 3360 if (HighQuad[i] > MaxQuad) { 3361 BestHighQuad = i; 3362 MaxQuad = HighQuad[i]; 3363 } 3364 } 3365 3366 // If it's possible to sort parts of either half with PSHUF{H|L}W, then do it. 3367 if (BestLowQuad != -1 || BestHighQuad != -1) { 3368 // First sort the 4 chunks in order using shufpd. 3369 SmallVector<SDOperand, 8> MaskVec; 3370 if (BestLowQuad != -1) 3371 MaskVec.push_back(DAG.getConstant(BestLowQuad, MVT::i32)); 3372 else 3373 MaskVec.push_back(DAG.getConstant(0, MVT::i32)); 3374 if (BestHighQuad != -1) 3375 MaskVec.push_back(DAG.getConstant(BestHighQuad, MVT::i32)); 3376 else 3377 MaskVec.push_back(DAG.getConstant(1, MVT::i32)); 3378 SDOperand Mask= DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, &MaskVec[0],2); 3379 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64, 3380 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V1), 3381 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V2), Mask); 3382 NewV = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, NewV); 3383 3384 // Now sort high and low parts separately. 3385 BitVector InOrder(8); 3386 if (BestLowQuad != -1) { 3387 // Sort lower half in order using PSHUFLW. 3388 MaskVec.clear(); 3389 bool AnyOutOrder = false; 3390 for (unsigned i = 0; i != 4; ++i) { 3391 SDOperand Elt = MaskElts[i]; 3392 if (Elt.getOpcode() == ISD::UNDEF) { 3393 MaskVec.push_back(Elt); 3394 InOrder.set(i); 3395 } else { 3396 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3397 if (EltIdx != i) 3398 AnyOutOrder = true; 3399 MaskVec.push_back(DAG.getConstant(EltIdx % 4, MaskEVT)); 3400 // If this element is in the right place after this shuffle, then 3401 // remember it. 3402 if ((int)(EltIdx / 4) == BestLowQuad) 3403 InOrder.set(i); 3404 } 3405 } 3406 if (AnyOutOrder) { 3407 for (unsigned i = 4; i != 8; ++i) 3408 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3409 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3410 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3411 } 3412 } 3413 3414 if (BestHighQuad != -1) { 3415 // Sort high half in order using PSHUFHW if possible. 3416 MaskVec.clear(); 3417 for (unsigned i = 0; i != 4; ++i) 3418 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3419 bool AnyOutOrder = false; 3420 for (unsigned i = 4; i != 8; ++i) { 3421 SDOperand Elt = MaskElts[i]; 3422 if (Elt.getOpcode() == ISD::UNDEF) { 3423 MaskVec.push_back(Elt); 3424 InOrder.set(i); 3425 } else { 3426 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3427 if (EltIdx != i) 3428 AnyOutOrder = true; 3429 MaskVec.push_back(DAG.getConstant((EltIdx % 4) + 4, MaskEVT)); 3430 // If this element is in the right place after this shuffle, then 3431 // remember it. 3432 if ((int)(EltIdx / 4) == BestHighQuad) 3433 InOrder.set(i); 3434 } 3435 } 3436 if (AnyOutOrder) { 3437 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3438 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3439 } 3440 } 3441 3442 // The other elements are put in the right place using pextrw and pinsrw. 3443 for (unsigned i = 0; i != 8; ++i) { 3444 if (InOrder[i]) 3445 continue; 3446 SDOperand Elt = MaskElts[i]; 3447 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3448 if (EltIdx == i) 3449 continue; 3450 SDOperand ExtOp = (EltIdx < 8) 3451 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3452 DAG.getConstant(EltIdx, PtrVT)) 3453 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3454 DAG.getConstant(EltIdx - 8, PtrVT)); 3455 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3456 DAG.getConstant(i, PtrVT)); 3457 } 3458 return NewV; 3459 } 3460 3461 // PSHUF{H|L}W are not used. Lower into extracts and inserts but try to use 3462 ///as few as possible. 3463 // First, let's find out how many elements are already in the right order. 3464 unsigned V1InOrder = 0; 3465 unsigned V1FromV1 = 0; 3466 unsigned V2InOrder = 0; 3467 unsigned V2FromV2 = 0; 3468 SmallVector<SDOperand, 8> V1Elts; 3469 SmallVector<SDOperand, 8> V2Elts; 3470 for (unsigned i = 0; i < 8; ++i) { 3471 SDOperand Elt = MaskElts[i]; 3472 if (Elt.getOpcode() == ISD::UNDEF) { 3473 V1Elts.push_back(Elt); 3474 V2Elts.push_back(Elt); 3475 ++V1InOrder; 3476 ++V2InOrder; 3477 continue; 3478 } 3479 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3480 if (EltIdx == i) { 3481 V1Elts.push_back(Elt); 3482 V2Elts.push_back(DAG.getConstant(i+8, MaskEVT)); 3483 ++V1InOrder; 3484 } else if (EltIdx == i+8) { 3485 V1Elts.push_back(Elt); 3486 V2Elts.push_back(DAG.getConstant(i, MaskEVT)); 3487 ++V2InOrder; 3488 } else if (EltIdx < 8) { 3489 V1Elts.push_back(Elt); 3490 ++V1FromV1; 3491 } else { 3492 V2Elts.push_back(DAG.getConstant(EltIdx-8, MaskEVT)); 3493 ++V2FromV2; 3494 } 3495 } 3496 3497 if (V2InOrder > V1InOrder) { 3498 PermMask = CommuteVectorShuffleMask(PermMask, DAG); 3499 std::swap(V1, V2); 3500 std::swap(V1Elts, V2Elts); 3501 std::swap(V1FromV1, V2FromV2); 3502 } 3503 3504 if ((V1FromV1 + V1InOrder) != 8) { 3505 // Some elements are from V2. 3506 if (V1FromV1) { 3507 // If there are elements that are from V1 but out of place, 3508 // then first sort them in place 3509 SmallVector<SDOperand, 8> MaskVec; 3510 for (unsigned i = 0; i < 8; ++i) { 3511 SDOperand Elt = V1Elts[i]; 3512 if (Elt.getOpcode() == ISD::UNDEF) { 3513 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3514 continue; 3515 } 3516 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3517 if (EltIdx >= 8) 3518 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3519 else 3520 MaskVec.push_back(DAG.getConstant(EltIdx, MaskEVT)); 3521 } 3522 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3523 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, V1, V1, Mask); 3524 } 3525 3526 NewV = V1; 3527 for (unsigned i = 0; i < 8; ++i) { 3528 SDOperand Elt = V1Elts[i]; 3529 if (Elt.getOpcode() == ISD::UNDEF) 3530 continue; 3531 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3532 if (EltIdx < 8) 3533 continue; 3534 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3535 DAG.getConstant(EltIdx - 8, PtrVT)); 3536 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3537 DAG.getConstant(i, PtrVT)); 3538 } 3539 return NewV; 3540 } else { 3541 // All elements are from V1. 3542 NewV = V1; 3543 for (unsigned i = 0; i < 8; ++i) { 3544 SDOperand Elt = V1Elts[i]; 3545 if (Elt.getOpcode() == ISD::UNDEF) 3546 continue; 3547 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3548 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3549 DAG.getConstant(EltIdx, PtrVT)); 3550 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3551 DAG.getConstant(i, PtrVT)); 3552 } 3553 return NewV; 3554 } 3555} 3556 3557/// RewriteAs4WideShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 3558/// ones if possible. This can be done when every pair / quad of shuffle mask 3559/// elements point to elements in the right sequence. e.g. 3560/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15> 3561static 3562SDOperand RewriteAs4WideShuffle(SDOperand V1, SDOperand V2, 3563 SDOperand PermMask, SelectionDAG &DAG, 3564 TargetLowering &TLI) { 3565 unsigned NumElems = PermMask.getNumOperands(); 3566 unsigned Scale = NumElems / 4; 3567 SmallVector<SDOperand, 4> MaskVec; 3568 for (unsigned i = 0; i < NumElems; i += Scale) { 3569 unsigned StartIdx = ~0U; 3570 for (unsigned j = 0; j < Scale; ++j) { 3571 SDOperand Elt = PermMask.getOperand(i+j); 3572 if (Elt.getOpcode() == ISD::UNDEF) 3573 continue; 3574 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3575 if (StartIdx == ~0U) 3576 StartIdx = EltIdx - (EltIdx % Scale); 3577 if (EltIdx != StartIdx + j) 3578 return SDOperand(); 3579 } 3580 if (StartIdx == ~0U) 3581 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MVT::i32)); 3582 else 3583 MaskVec.push_back(DAG.getConstant(StartIdx / Scale, MVT::i32)); 3584 } 3585 3586 V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1); 3587 V2 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V2); 3588 return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1, V2, 3589 DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, &MaskVec[0],4)); 3590} 3591 3592SDOperand 3593X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 3594 SDOperand V1 = Op.getOperand(0); 3595 SDOperand V2 = Op.getOperand(1); 3596 SDOperand PermMask = Op.getOperand(2); 3597 MVT::ValueType VT = Op.getValueType(); 3598 unsigned NumElems = PermMask.getNumOperands(); 3599 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 3600 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 3601 bool V1IsSplat = false; 3602 bool V2IsSplat = false; 3603 3604 if (isUndefShuffle(Op.Val)) 3605 return DAG.getNode(ISD::UNDEF, VT); 3606 3607 if (isZeroShuffle(Op.Val)) 3608 return getZeroVector(VT, DAG); 3609 3610 if (isIdentityMask(PermMask.Val)) 3611 return V1; 3612 else if (isIdentityMask(PermMask.Val, true)) 3613 return V2; 3614 3615 if (isSplatMask(PermMask.Val)) { 3616 if (NumElems <= 4) return Op; 3617 // Promote it to a v4i32 splat. 3618 return PromoteSplat(Op, DAG); 3619 } 3620 3621 if (X86::isMOVLMask(PermMask.Val)) 3622 return (V1IsUndef) ? V2 : Op; 3623 3624 if (X86::isMOVSHDUPMask(PermMask.Val) || 3625 X86::isMOVSLDUPMask(PermMask.Val) || 3626 X86::isMOVHLPSMask(PermMask.Val) || 3627 X86::isMOVHPMask(PermMask.Val) || 3628 X86::isMOVLPMask(PermMask.Val)) 3629 return Op; 3630 3631 if (ShouldXformToMOVHLPS(PermMask.Val) || 3632 ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val)) 3633 return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3634 3635 bool Commuted = false; 3636 // FIXME: This should also accept a bitcast of a splat? Be careful, not 3637 // 1,1,1,1 -> v8i16 though. 3638 V1IsSplat = isSplatVector(V1.Val); 3639 V2IsSplat = isSplatVector(V2.Val); 3640 3641 // Canonicalize the splat or undef, if present, to be on the RHS. 3642 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 3643 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3644 std::swap(V1IsSplat, V2IsSplat); 3645 std::swap(V1IsUndef, V2IsUndef); 3646 Commuted = true; 3647 } 3648 3649 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) { 3650 if (V2IsUndef) return V1; 3651 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3652 if (V2IsSplat) { 3653 // V2 is a splat, so the mask may be malformed. That is, it may point 3654 // to any V2 element. The instruction selectior won't like this. Get 3655 // a corrected mask and commute to form a proper MOVS{S|D}. 3656 SDOperand NewMask = getMOVLMask(NumElems, DAG); 3657 if (NewMask.Val != PermMask.Val) 3658 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3659 } 3660 return Op; 3661 } 3662 3663 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3664 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3665 X86::isUNPCKLMask(PermMask.Val) || 3666 X86::isUNPCKHMask(PermMask.Val)) 3667 return Op; 3668 3669 if (V2IsSplat) { 3670 // Normalize mask so all entries that point to V2 points to its first 3671 // element then try to match unpck{h|l} again. If match, return a 3672 // new vector_shuffle with the corrected mask. 3673 SDOperand NewMask = NormalizeMask(PermMask, DAG); 3674 if (NewMask.Val != PermMask.Val) { 3675 if (X86::isUNPCKLMask(PermMask.Val, true)) { 3676 SDOperand NewMask = getUnpacklMask(NumElems, DAG); 3677 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3678 } else if (X86::isUNPCKHMask(PermMask.Val, true)) { 3679 SDOperand NewMask = getUnpackhMask(NumElems, DAG); 3680 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3681 } 3682 } 3683 } 3684 3685 // Normalize the node to match x86 shuffle ops if needed 3686 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val)) 3687 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3688 3689 if (Commuted) { 3690 // Commute is back and try unpck* again. 3691 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3692 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3693 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3694 X86::isUNPCKLMask(PermMask.Val) || 3695 X86::isUNPCKHMask(PermMask.Val)) 3696 return Op; 3697 } 3698 3699 // If VT is integer, try PSHUF* first, then SHUFP*. 3700 if (MVT::isInteger(VT)) { 3701 // MMX doesn't have PSHUFD; it does have PSHUFW. While it's theoretically 3702 // possible to shuffle a v2i32 using PSHUFW, that's not yet implemented. 3703 if (((MVT::getSizeInBits(VT) != 64 || NumElems == 4) && 3704 X86::isPSHUFDMask(PermMask.Val)) || 3705 X86::isPSHUFHWMask(PermMask.Val) || 3706 X86::isPSHUFLWMask(PermMask.Val)) { 3707 if (V2.getOpcode() != ISD::UNDEF) 3708 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3709 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3710 return Op; 3711 } 3712 3713 if (X86::isSHUFPMask(PermMask.Val) && 3714 MVT::getSizeInBits(VT) != 64) // Don't do this for MMX. 3715 return Op; 3716 } else { 3717 // Floating point cases in the other order. 3718 if (X86::isSHUFPMask(PermMask.Val)) 3719 return Op; 3720 if (X86::isPSHUFDMask(PermMask.Val) || 3721 X86::isPSHUFHWMask(PermMask.Val) || 3722 X86::isPSHUFLWMask(PermMask.Val)) { 3723 if (V2.getOpcode() != ISD::UNDEF) 3724 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3725 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3726 return Op; 3727 } 3728 } 3729 3730 // If the shuffle can be rewritten as a 4 wide shuffle, then do it! 3731 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 3732 SDOperand NewOp = RewriteAs4WideShuffle(V1, V2, PermMask, DAG, *this); 3733 if (NewOp.Val) 3734 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3735 } 3736 3737 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 3738 if (VT == MVT::v8i16) { 3739 SDOperand NewOp = LowerVECTOR_SHUFFLEv8i16(V1, V2, PermMask, DAG, *this); 3740 if (NewOp.Val) 3741 return NewOp; 3742 } 3743 3744 // Handle all 4 wide cases with a number of shuffles. 3745 if (NumElems == 4 && MVT::getSizeInBits(VT) != 64) { 3746 // Don't do this for MMX. 3747 MVT::ValueType MaskVT = PermMask.getValueType(); 3748 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3749 SmallVector<std::pair<int, int>, 8> Locs; 3750 Locs.reserve(NumElems); 3751 SmallVector<SDOperand, 8> Mask1(NumElems, 3752 DAG.getNode(ISD::UNDEF, MaskEVT)); 3753 SmallVector<SDOperand, 8> Mask2(NumElems, 3754 DAG.getNode(ISD::UNDEF, MaskEVT)); 3755 unsigned NumHi = 0; 3756 unsigned NumLo = 0; 3757 // If no more than two elements come from either vector. This can be 3758 // implemented with two shuffles. First shuffle gather the elements. 3759 // The second shuffle, which takes the first shuffle as both of its 3760 // vector operands, put the elements into the right order. 3761 for (unsigned i = 0; i != NumElems; ++i) { 3762 SDOperand Elt = PermMask.getOperand(i); 3763 if (Elt.getOpcode() == ISD::UNDEF) { 3764 Locs[i] = std::make_pair(-1, -1); 3765 } else { 3766 unsigned Val = cast<ConstantSDNode>(Elt)->getValue(); 3767 if (Val < NumElems) { 3768 Locs[i] = std::make_pair(0, NumLo); 3769 Mask1[NumLo] = Elt; 3770 NumLo++; 3771 } else { 3772 Locs[i] = std::make_pair(1, NumHi); 3773 if (2+NumHi < NumElems) 3774 Mask1[2+NumHi] = Elt; 3775 NumHi++; 3776 } 3777 } 3778 } 3779 if (NumLo <= 2 && NumHi <= 2) { 3780 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3781 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3782 &Mask1[0], Mask1.size())); 3783 for (unsigned i = 0; i != NumElems; ++i) { 3784 if (Locs[i].first == -1) 3785 continue; 3786 else { 3787 unsigned Idx = (i < NumElems/2) ? 0 : NumElems; 3788 Idx += Locs[i].first * (NumElems/2) + Locs[i].second; 3789 Mask2[i] = DAG.getConstant(Idx, MaskEVT); 3790 } 3791 } 3792 3793 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, 3794 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3795 &Mask2[0], Mask2.size())); 3796 } 3797 3798 // Break it into (shuffle shuffle_hi, shuffle_lo). 3799 Locs.clear(); 3800 SmallVector<SDOperand,8> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3801 SmallVector<SDOperand,8> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3802 SmallVector<SDOperand,8> *MaskPtr = &LoMask; 3803 unsigned MaskIdx = 0; 3804 unsigned LoIdx = 0; 3805 unsigned HiIdx = NumElems/2; 3806 for (unsigned i = 0; i != NumElems; ++i) { 3807 if (i == NumElems/2) { 3808 MaskPtr = &HiMask; 3809 MaskIdx = 1; 3810 LoIdx = 0; 3811 HiIdx = NumElems/2; 3812 } 3813 SDOperand Elt = PermMask.getOperand(i); 3814 if (Elt.getOpcode() == ISD::UNDEF) { 3815 Locs[i] = std::make_pair(-1, -1); 3816 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) { 3817 Locs[i] = std::make_pair(MaskIdx, LoIdx); 3818 (*MaskPtr)[LoIdx] = Elt; 3819 LoIdx++; 3820 } else { 3821 Locs[i] = std::make_pair(MaskIdx, HiIdx); 3822 (*MaskPtr)[HiIdx] = Elt; 3823 HiIdx++; 3824 } 3825 } 3826 3827 SDOperand LoShuffle = 3828 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3829 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3830 &LoMask[0], LoMask.size())); 3831 SDOperand HiShuffle = 3832 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3833 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3834 &HiMask[0], HiMask.size())); 3835 SmallVector<SDOperand, 8> MaskOps; 3836 for (unsigned i = 0; i != NumElems; ++i) { 3837 if (Locs[i].first == -1) { 3838 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3839 } else { 3840 unsigned Idx = Locs[i].first * NumElems + Locs[i].second; 3841 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT)); 3842 } 3843 } 3844 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle, 3845 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3846 &MaskOps[0], MaskOps.size())); 3847 } 3848 3849 return SDOperand(); 3850} 3851 3852SDOperand 3853X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3854 if (!isa<ConstantSDNode>(Op.getOperand(1))) 3855 return SDOperand(); 3856 3857 MVT::ValueType VT = Op.getValueType(); 3858 // TODO: handle v16i8. 3859 if (MVT::getSizeInBits(VT) == 16) { 3860 SDOperand Vec = Op.getOperand(0); 3861 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3862 if (Idx == 0) 3863 return DAG.getNode(ISD::TRUNCATE, MVT::i16, 3864 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, 3865 DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Vec), 3866 Op.getOperand(1))); 3867 // Transform it so it match pextrw which produces a 32-bit result. 3868 MVT::ValueType EVT = (MVT::ValueType)(VT+1); 3869 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, 3870 Op.getOperand(0), Op.getOperand(1)); 3871 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, 3872 DAG.getValueType(VT)); 3873 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3874 } else if (MVT::getSizeInBits(VT) == 32) { 3875 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3876 if (Idx == 0) 3877 return Op; 3878 // SHUFPS the element to the lowest double word, then movss. 3879 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3880 SmallVector<SDOperand, 8> IdxVec; 3881 IdxVec. 3882 push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT))); 3883 IdxVec. 3884 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3885 IdxVec. 3886 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3887 IdxVec. 3888 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3889 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3890 &IdxVec[0], IdxVec.size()); 3891 SDOperand Vec = Op.getOperand(0); 3892 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3893 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3894 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3895 DAG.getConstant(0, getPointerTy())); 3896 } else if (MVT::getSizeInBits(VT) == 64) { 3897 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3898 if (Idx == 0) 3899 return Op; 3900 3901 // UNPCKHPD the element to the lowest double word, then movsd. 3902 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 3903 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 3904 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3905 SmallVector<SDOperand, 8> IdxVec; 3906 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT))); 3907 IdxVec. 3908 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3909 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3910 &IdxVec[0], IdxVec.size()); 3911 SDOperand Vec = Op.getOperand(0); 3912 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3913 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3914 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3915 DAG.getConstant(0, getPointerTy())); 3916 } 3917 3918 return SDOperand(); 3919} 3920 3921SDOperand 3922X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3923 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 3924 // as its second argument. 3925 MVT::ValueType VT = Op.getValueType(); 3926 MVT::ValueType BaseVT = MVT::getVectorElementType(VT); 3927 SDOperand N0 = Op.getOperand(0); 3928 SDOperand N1 = Op.getOperand(1); 3929 SDOperand N2 = Op.getOperand(2); 3930 if (MVT::getSizeInBits(BaseVT) == 16) { 3931 if (N1.getValueType() != MVT::i32) 3932 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3933 if (N2.getValueType() != MVT::i32) 3934 N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(),getPointerTy()); 3935 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2); 3936 } else if (MVT::getSizeInBits(BaseVT) == 32) { 3937 unsigned Idx = cast<ConstantSDNode>(N2)->getValue(); 3938 if (Idx == 0) { 3939 // Use a movss. 3940 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, N1); 3941 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3942 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 3943 SmallVector<SDOperand, 8> MaskVec; 3944 MaskVec.push_back(DAG.getConstant(4, BaseVT)); 3945 for (unsigned i = 1; i <= 3; ++i) 3946 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3947 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, N0, N1, 3948 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3949 &MaskVec[0], MaskVec.size())); 3950 } else { 3951 // Use two pinsrw instructions to insert a 32 bit value. 3952 Idx <<= 1; 3953 if (MVT::isFloatingPoint(N1.getValueType())) { 3954 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v4f32, N1); 3955 N1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, N1); 3956 N1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, N1, 3957 DAG.getConstant(0, getPointerTy())); 3958 } 3959 N0 = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, N0); 3960 N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1, 3961 DAG.getConstant(Idx, getPointerTy())); 3962 N1 = DAG.getNode(ISD::SRL, MVT::i32, N1, DAG.getConstant(16, MVT::i8)); 3963 N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1, 3964 DAG.getConstant(Idx+1, getPointerTy())); 3965 return DAG.getNode(ISD::BIT_CONVERT, VT, N0); 3966 } 3967 } 3968 3969 return SDOperand(); 3970} 3971 3972SDOperand 3973X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3974 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); 3975 return DAG.getNode(X86ISD::S2VEC, Op.getValueType(), AnyExt); 3976} 3977 3978// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 3979// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 3980// one of the above mentioned nodes. It has to be wrapped because otherwise 3981// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 3982// be used to form addressing mode. These wrapped nodes will be selected 3983// into MOV32ri. 3984SDOperand 3985X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 3986 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 3987 SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(), 3988 getPointerTy(), 3989 CP->getAlignment()); 3990 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3991 // With PIC, the address is actually $g + Offset. 3992 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3993 !Subtarget->isPICStyleRIPRel()) { 3994 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3995 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3996 Result); 3997 } 3998 3999 return Result; 4000} 4001 4002SDOperand 4003X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 4004 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 4005 SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); 4006 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4007 // With PIC, the address is actually $g + Offset. 4008 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4009 !Subtarget->isPICStyleRIPRel()) { 4010 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4011 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4012 Result); 4013 } 4014 4015 // For Darwin & Mingw32, external and weak symbols are indirect, so we want to 4016 // load the value at address GV, not the value of GV itself. This means that 4017 // the GlobalAddress must be in the base or index register of the address, not 4018 // the GV offset field. Platform check is inside GVRequiresExtraLoad() call 4019 // The same applies for external symbols during PIC codegen 4020 if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)) 4021 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, NULL, 0); 4022 4023 return Result; 4024} 4025 4026// Lower ISD::GlobalTLSAddress using the "general dynamic" model 4027static SDOperand 4028LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 4029 const MVT::ValueType PtrVT) { 4030 SDOperand InFlag; 4031 SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX, 4032 DAG.getNode(X86ISD::GlobalBaseReg, 4033 PtrVT), InFlag); 4034 InFlag = Chain.getValue(1); 4035 4036 // emit leal symbol@TLSGD(,%ebx,1), %eax 4037 SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag); 4038 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 4039 GA->getValueType(0), 4040 GA->getOffset()); 4041 SDOperand Ops[] = { Chain, TGA, InFlag }; 4042 SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3); 4043 InFlag = Result.getValue(2); 4044 Chain = Result.getValue(1); 4045 4046 // call ___tls_get_addr. This function receives its argument in 4047 // the register EAX. 4048 Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag); 4049 InFlag = Chain.getValue(1); 4050 4051 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4052 SDOperand Ops1[] = { Chain, 4053 DAG.getTargetExternalSymbol("___tls_get_addr", 4054 PtrVT), 4055 DAG.getRegister(X86::EAX, PtrVT), 4056 DAG.getRegister(X86::EBX, PtrVT), 4057 InFlag }; 4058 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5); 4059 InFlag = Chain.getValue(1); 4060 4061 return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag); 4062} 4063 4064// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 4065// "local exec" model. 4066static SDOperand 4067LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 4068 const MVT::ValueType PtrVT) { 4069 // Get the Thread Pointer 4070 SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT); 4071 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 4072 // exec) 4073 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 4074 GA->getValueType(0), 4075 GA->getOffset()); 4076 SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA); 4077 4078 if (GA->getGlobal()->isDeclaration()) // initial exec TLS model 4079 Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, NULL, 0); 4080 4081 // The address of the thread local variable is the add of the thread 4082 // pointer with the offset of the variable. 4083 return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset); 4084} 4085 4086SDOperand 4087X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { 4088 // TODO: implement the "local dynamic" model 4089 // TODO: implement the "initial exec"model for pic executables 4090 assert(!Subtarget->is64Bit() && Subtarget->isTargetELF() && 4091 "TLS not implemented for non-ELF and 64-bit targets"); 4092 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 4093 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 4094 // otherwise use the "Local Exec"TLS Model 4095 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 4096 return LowerToTLSGeneralDynamicModel(GA, DAG, getPointerTy()); 4097 else 4098 return LowerToTLSExecModel(GA, DAG, getPointerTy()); 4099} 4100 4101SDOperand 4102X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { 4103 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 4104 SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 4105 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4106 // With PIC, the address is actually $g + Offset. 4107 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4108 !Subtarget->isPICStyleRIPRel()) { 4109 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4110 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4111 Result); 4112 } 4113 4114 return Result; 4115} 4116 4117SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 4118 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 4119 SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); 4120 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 4121 // With PIC, the address is actually $g + Offset. 4122 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 4123 !Subtarget->isPICStyleRIPRel()) { 4124 Result = DAG.getNode(ISD::ADD, getPointerTy(), 4125 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 4126 Result); 4127 } 4128 4129 return Result; 4130} 4131 4132/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and 4133/// take a 2 x i32 value to shift plus a shift amount. 4134SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { 4135 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 4136 "Not an i64 shift!"); 4137 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 4138 SDOperand ShOpLo = Op.getOperand(0); 4139 SDOperand ShOpHi = Op.getOperand(1); 4140 SDOperand ShAmt = Op.getOperand(2); 4141 SDOperand Tmp1 = isSRA ? 4142 DAG.getNode(ISD::SRA, MVT::i32, ShOpHi, DAG.getConstant(31, MVT::i8)) : 4143 DAG.getConstant(0, MVT::i32); 4144 4145 SDOperand Tmp2, Tmp3; 4146 if (Op.getOpcode() == ISD::SHL_PARTS) { 4147 Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt); 4148 Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt); 4149 } else { 4150 Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt); 4151 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt); 4152 } 4153 4154 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4155 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, 4156 DAG.getConstant(32, MVT::i8)); 4157 SDOperand Cond = DAG.getNode(X86ISD::CMP, MVT::i32, 4158 AndNode, DAG.getConstant(0, MVT::i8)); 4159 4160 SDOperand Hi, Lo; 4161 SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4162 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag); 4163 SmallVector<SDOperand, 4> Ops; 4164 if (Op.getOpcode() == ISD::SHL_PARTS) { 4165 Ops.push_back(Tmp2); 4166 Ops.push_back(Tmp3); 4167 Ops.push_back(CC); 4168 Ops.push_back(Cond); 4169 Hi = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size()); 4170 4171 Ops.clear(); 4172 Ops.push_back(Tmp3); 4173 Ops.push_back(Tmp1); 4174 Ops.push_back(CC); 4175 Ops.push_back(Cond); 4176 Lo = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size()); 4177 } else { 4178 Ops.push_back(Tmp2); 4179 Ops.push_back(Tmp3); 4180 Ops.push_back(CC); 4181 Ops.push_back(Cond); 4182 Lo = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size()); 4183 4184 Ops.clear(); 4185 Ops.push_back(Tmp3); 4186 Ops.push_back(Tmp1); 4187 Ops.push_back(CC); 4188 Ops.push_back(Cond); 4189 Hi = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size()); 4190 } 4191 4192 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32); 4193 Ops.clear(); 4194 Ops.push_back(Lo); 4195 Ops.push_back(Hi); 4196 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size()); 4197} 4198 4199SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 4200 assert(Op.getOperand(0).getValueType() <= MVT::i64 && 4201 Op.getOperand(0).getValueType() >= MVT::i16 && 4202 "Unknown SINT_TO_FP to lower!"); 4203 4204 SDOperand Result; 4205 MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); 4206 unsigned Size = MVT::getSizeInBits(SrcVT)/8; 4207 MachineFunction &MF = DAG.getMachineFunction(); 4208 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 4209 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4210 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), 4211 StackSlot, NULL, 0); 4212 4213 // These are really Legal; caller falls through into that case. 4214 if (SrcVT==MVT::i32 && Op.getValueType() == MVT::f32 && X86ScalarSSEf32) 4215 return Result; 4216 if (SrcVT==MVT::i32 && Op.getValueType() == MVT::f64 && X86ScalarSSEf64) 4217 return Result; 4218 if (SrcVT==MVT::i64 && Op.getValueType() != MVT::f80 && 4219 Subtarget->is64Bit()) 4220 return Result; 4221 4222 // Build the FILD 4223 SDVTList Tys; 4224 bool useSSE = (X86ScalarSSEf32 && Op.getValueType() == MVT::f32) || 4225 (X86ScalarSSEf64 && Op.getValueType() == MVT::f64); 4226 if (useSSE) 4227 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 4228 else 4229 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 4230 SmallVector<SDOperand, 8> Ops; 4231 Ops.push_back(Chain); 4232 Ops.push_back(StackSlot); 4233 Ops.push_back(DAG.getValueType(SrcVT)); 4234 Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG :X86ISD::FILD, 4235 Tys, &Ops[0], Ops.size()); 4236 4237 if (useSSE) { 4238 Chain = Result.getValue(1); 4239 SDOperand InFlag = Result.getValue(2); 4240 4241 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 4242 // shouldn't be necessary except that RFP cannot be live across 4243 // multiple blocks. When stackifier is fixed, they can be uncoupled. 4244 MachineFunction &MF = DAG.getMachineFunction(); 4245 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 4246 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4247 Tys = DAG.getVTList(MVT::Other); 4248 SmallVector<SDOperand, 8> Ops; 4249 Ops.push_back(Chain); 4250 Ops.push_back(Result); 4251 Ops.push_back(StackSlot); 4252 Ops.push_back(DAG.getValueType(Op.getValueType())); 4253 Ops.push_back(InFlag); 4254 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 4255 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, NULL, 0); 4256 } 4257 4258 return Result; 4259} 4260 4261std::pair<SDOperand,SDOperand> X86TargetLowering:: 4262FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) { 4263 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 && 4264 "Unknown FP_TO_SINT to lower!"); 4265 4266 // These are really Legal. 4267 if (Op.getValueType() == MVT::i32 && 4268 X86ScalarSSEf32 && Op.getOperand(0).getValueType() == MVT::f32) 4269 return std::make_pair(SDOperand(), SDOperand()); 4270 if (Op.getValueType() == MVT::i32 && 4271 X86ScalarSSEf64 && Op.getOperand(0).getValueType() == MVT::f64) 4272 return std::make_pair(SDOperand(), SDOperand()); 4273 if (Subtarget->is64Bit() && 4274 Op.getValueType() == MVT::i64 && 4275 Op.getOperand(0).getValueType() != MVT::f80) 4276 return std::make_pair(SDOperand(), SDOperand()); 4277 4278 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 4279 // stack slot. 4280 MachineFunction &MF = DAG.getMachineFunction(); 4281 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; 4282 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4283 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4284 unsigned Opc; 4285 switch (Op.getValueType()) { 4286 default: assert(0 && "Invalid FP_TO_SINT to lower!"); 4287 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 4288 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 4289 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 4290 } 4291 4292 SDOperand Chain = DAG.getEntryNode(); 4293 SDOperand Value = Op.getOperand(0); 4294 if ((X86ScalarSSEf32 && Op.getOperand(0).getValueType() == MVT::f32) || 4295 (X86ScalarSSEf64 && Op.getOperand(0).getValueType() == MVT::f64)) { 4296 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 4297 Chain = DAG.getStore(Chain, Value, StackSlot, NULL, 0); 4298 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 4299 SDOperand Ops[] = { 4300 Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType()) 4301 }; 4302 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 4303 Chain = Value.getValue(1); 4304 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4305 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4306 } 4307 4308 // Build the FP_TO_INT*_IN_MEM 4309 SDOperand Ops[] = { Chain, Value, StackSlot }; 4310 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3); 4311 4312 return std::make_pair(FIST, StackSlot); 4313} 4314 4315SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 4316 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(Op, DAG); 4317 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4318 if (FIST.Val == 0) return SDOperand(); 4319 4320 // Load the result. 4321 return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0); 4322} 4323 4324SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) { 4325 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(SDOperand(N, 0), DAG); 4326 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4327 if (FIST.Val == 0) return 0; 4328 4329 // Return an i64 load from the stack slot. 4330 SDOperand Res = DAG.getLoad(MVT::i64, FIST, StackSlot, NULL, 0); 4331 4332 // Use a MERGE_VALUES node to drop the chain result value. 4333 return DAG.getNode(ISD::MERGE_VALUES, MVT::i64, Res).Val; 4334} 4335 4336SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { 4337 MVT::ValueType VT = Op.getValueType(); 4338 MVT::ValueType EltVT = VT; 4339 if (MVT::isVector(VT)) 4340 EltVT = MVT::getVectorElementType(VT); 4341 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 4342 std::vector<Constant*> CV; 4343 if (EltVT == MVT::f64) { 4344 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, ~(1ULL << 63)))); 4345 CV.push_back(C); 4346 CV.push_back(C); 4347 } else { 4348 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, ~(1U << 31)))); 4349 CV.push_back(C); 4350 CV.push_back(C); 4351 CV.push_back(C); 4352 CV.push_back(C); 4353 } 4354 Constant *C = ConstantVector::get(CV); 4355 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4356 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0, 4357 false, 16); 4358 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask); 4359} 4360 4361SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { 4362 MVT::ValueType VT = Op.getValueType(); 4363 MVT::ValueType EltVT = VT; 4364 unsigned EltNum = 1; 4365 if (MVT::isVector(VT)) { 4366 EltVT = MVT::getVectorElementType(VT); 4367 EltNum = MVT::getVectorNumElements(VT); 4368 } 4369 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 4370 std::vector<Constant*> CV; 4371 if (EltVT == MVT::f64) { 4372 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, 1ULL << 63))); 4373 CV.push_back(C); 4374 CV.push_back(C); 4375 } else { 4376 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, 1U << 31))); 4377 CV.push_back(C); 4378 CV.push_back(C); 4379 CV.push_back(C); 4380 CV.push_back(C); 4381 } 4382 Constant *C = ConstantVector::get(CV); 4383 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4384 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0, 4385 false, 16); 4386 if (MVT::isVector(VT)) { 4387 return DAG.getNode(ISD::BIT_CONVERT, VT, 4388 DAG.getNode(ISD::XOR, MVT::v2i64, 4389 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)), 4390 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Mask))); 4391 } else { 4392 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask); 4393 } 4394} 4395 4396SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { 4397 SDOperand Op0 = Op.getOperand(0); 4398 SDOperand Op1 = Op.getOperand(1); 4399 MVT::ValueType VT = Op.getValueType(); 4400 MVT::ValueType SrcVT = Op1.getValueType(); 4401 const Type *SrcTy = MVT::getTypeForValueType(SrcVT); 4402 4403 // If second operand is smaller, extend it first. 4404 if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) { 4405 Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1); 4406 SrcVT = VT; 4407 SrcTy = MVT::getTypeForValueType(SrcVT); 4408 } 4409 // And if it is bigger, shrink it first. 4410 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4411 Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1); 4412 SrcVT = VT; 4413 SrcTy = MVT::getTypeForValueType(SrcVT); 4414 } 4415 4416 // At this point the operands and the result should have the same 4417 // type, and that won't be f80 since that is not custom lowered. 4418 4419 // First get the sign bit of second operand. 4420 std::vector<Constant*> CV; 4421 if (SrcVT == MVT::f64) { 4422 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 1ULL << 63)))); 4423 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4424 } else { 4425 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 1U << 31)))); 4426 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4427 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4428 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4429 } 4430 Constant *C = ConstantVector::get(CV); 4431 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4432 SDOperand Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx, NULL, 0, 4433 false, 16); 4434 SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); 4435 4436 // Shift sign bit right or left if the two operands have different types. 4437 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4438 // Op0 is MVT::f32, Op1 is MVT::f64. 4439 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit); 4440 SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit, 4441 DAG.getConstant(32, MVT::i32)); 4442 SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit); 4443 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit, 4444 DAG.getConstant(0, getPointerTy())); 4445 } 4446 4447 // Clear first operand sign bit. 4448 CV.clear(); 4449 if (VT == MVT::f64) { 4450 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, ~(1ULL << 63))))); 4451 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4452 } else { 4453 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, ~(1U << 31))))); 4454 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4455 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4456 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4457 } 4458 C = ConstantVector::get(CV); 4459 CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4460 SDOperand Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0, 4461 false, 16); 4462 SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2); 4463 4464 // Or the value with the sign bit. 4465 return DAG.getNode(X86ISD::FOR, VT, Val, SignBit); 4466} 4467 4468SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { 4469 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 4470 SDOperand Cond; 4471 SDOperand Op0 = Op.getOperand(0); 4472 SDOperand Op1 = Op.getOperand(1); 4473 SDOperand CC = Op.getOperand(2); 4474 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 4475 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); 4476 unsigned X86CC; 4477 4478 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, 4479 Op0, Op1, DAG)) { 4480 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4481 return DAG.getNode(X86ISD::SETCC, MVT::i8, 4482 DAG.getConstant(X86CC, MVT::i8), Cond); 4483 } 4484 4485 assert(isFP && "Illegal integer SetCC!"); 4486 4487 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4488 switch (SetCCOpcode) { 4489 default: assert(false && "Illegal floating point SetCC!"); 4490 case ISD::SETOEQ: { // !PF & ZF 4491 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4492 DAG.getConstant(X86::COND_NP, MVT::i8), Cond); 4493 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4494 DAG.getConstant(X86::COND_E, MVT::i8), Cond); 4495 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); 4496 } 4497 case ISD::SETUNE: { // PF | !ZF 4498 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4499 DAG.getConstant(X86::COND_P, MVT::i8), Cond); 4500 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4501 DAG.getConstant(X86::COND_NE, MVT::i8), Cond); 4502 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); 4503 } 4504 } 4505} 4506 4507 4508SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { 4509 bool addTest = true; 4510 SDOperand Cond = Op.getOperand(0); 4511 SDOperand CC; 4512 4513 if (Cond.getOpcode() == ISD::SETCC) 4514 Cond = LowerSETCC(Cond, DAG); 4515 4516 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4517 // setting operand in place of the X86ISD::SETCC. 4518 if (Cond.getOpcode() == X86ISD::SETCC) { 4519 CC = Cond.getOperand(0); 4520 4521 SDOperand Cmp = Cond.getOperand(1); 4522 unsigned Opc = Cmp.getOpcode(); 4523 MVT::ValueType VT = Op.getValueType(); 4524 bool IllegalFPCMov = false; 4525 if (VT == MVT::f32 && !X86ScalarSSEf32) 4526 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 4527 else if (VT == MVT::f64 && !X86ScalarSSEf64) 4528 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 4529 else if (VT == MVT::f80) 4530 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 4531 if ((Opc == X86ISD::CMP || 4532 Opc == X86ISD::COMI || 4533 Opc == X86ISD::UCOMI) && !IllegalFPCMov) { 4534 Cond = Cmp; 4535 addTest = false; 4536 } 4537 } 4538 4539 if (addTest) { 4540 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4541 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4542 } 4543 4544 const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(), 4545 MVT::Flag); 4546 SmallVector<SDOperand, 4> Ops; 4547 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 4548 // condition is true. 4549 Ops.push_back(Op.getOperand(2)); 4550 Ops.push_back(Op.getOperand(1)); 4551 Ops.push_back(CC); 4552 Ops.push_back(Cond); 4553 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 4554} 4555 4556SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { 4557 bool addTest = true; 4558 SDOperand Chain = Op.getOperand(0); 4559 SDOperand Cond = Op.getOperand(1); 4560 SDOperand Dest = Op.getOperand(2); 4561 SDOperand CC; 4562 4563 if (Cond.getOpcode() == ISD::SETCC) 4564 Cond = LowerSETCC(Cond, DAG); 4565 4566 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4567 // setting operand in place of the X86ISD::SETCC. 4568 if (Cond.getOpcode() == X86ISD::SETCC) { 4569 CC = Cond.getOperand(0); 4570 4571 SDOperand Cmp = Cond.getOperand(1); 4572 unsigned Opc = Cmp.getOpcode(); 4573 if (Opc == X86ISD::CMP || 4574 Opc == X86ISD::COMI || 4575 Opc == X86ISD::UCOMI) { 4576 Cond = Cmp; 4577 addTest = false; 4578 } 4579 } 4580 4581 if (addTest) { 4582 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4583 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4584 } 4585 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), 4586 Chain, Op.getOperand(2), CC, Cond); 4587} 4588 4589SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { 4590 unsigned CallingConv = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 4591 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 4592 4593 if (Subtarget->is64Bit()) 4594 if(CallingConv==CallingConv::Fast && isTailCall && PerformTailCallOpt) 4595 return LowerX86_TailCallTo(Op, DAG, CallingConv); 4596 else 4597 return LowerX86_64CCCCallTo(Op, DAG, CallingConv); 4598 else 4599 switch (CallingConv) { 4600 default: 4601 assert(0 && "Unsupported calling convention"); 4602 case CallingConv::Fast: 4603 if (isTailCall && PerformTailCallOpt) 4604 return LowerX86_TailCallTo(Op, DAG, CallingConv); 4605 else 4606 return LowerCCCCallTo(Op,DAG, CallingConv); 4607 case CallingConv::C: 4608 case CallingConv::X86_StdCall: 4609 return LowerCCCCallTo(Op, DAG, CallingConv); 4610 case CallingConv::X86_FastCall: 4611 return LowerFastCCCallTo(Op, DAG, CallingConv); 4612 } 4613} 4614 4615 4616// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 4617// Calls to _alloca is needed to probe the stack when allocating more than 4k 4618// bytes in one go. Touching the stack at 4K increments is necessary to ensure 4619// that the guard pages used by the OS virtual memory manager are allocated in 4620// correct sequence. 4621SDOperand 4622X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, 4623 SelectionDAG &DAG) { 4624 assert(Subtarget->isTargetCygMing() && 4625 "This should be used only on Cygwin/Mingw targets"); 4626 4627 // Get the inputs. 4628 SDOperand Chain = Op.getOperand(0); 4629 SDOperand Size = Op.getOperand(1); 4630 // FIXME: Ensure alignment here 4631 4632 SDOperand Flag; 4633 4634 MVT::ValueType IntPtr = getPointerTy(); 4635 MVT::ValueType SPTy = (Subtarget->is64Bit() ? MVT::i64 : MVT::i32); 4636 4637 Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag); 4638 Flag = Chain.getValue(1); 4639 4640 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4641 SDOperand Ops[] = { Chain, 4642 DAG.getTargetExternalSymbol("_alloca", IntPtr), 4643 DAG.getRegister(X86::EAX, IntPtr), 4644 Flag }; 4645 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4); 4646 Flag = Chain.getValue(1); 4647 4648 Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1); 4649 4650 std::vector<MVT::ValueType> Tys; 4651 Tys.push_back(SPTy); 4652 Tys.push_back(MVT::Other); 4653 SDOperand Ops1[2] = { Chain.getValue(0), Chain }; 4654 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2); 4655} 4656 4657SDOperand 4658X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { 4659 MachineFunction &MF = DAG.getMachineFunction(); 4660 const Function* Fn = MF.getFunction(); 4661 if (Fn->hasExternalLinkage() && 4662 Subtarget->isTargetCygMing() && 4663 Fn->getName() == "main") 4664 MF.getInfo<X86MachineFunctionInfo>()->setForceFramePointer(true); 4665 4666 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 4667 if (Subtarget->is64Bit()) 4668 return LowerX86_64CCCArguments(Op, DAG); 4669 else 4670 switch(CC) { 4671 default: 4672 assert(0 && "Unsupported calling convention"); 4673 case CallingConv::Fast: 4674 return LowerCCCArguments(Op,DAG, true); 4675 // Falls through 4676 case CallingConv::C: 4677 return LowerCCCArguments(Op, DAG); 4678 case CallingConv::X86_StdCall: 4679 MF.getInfo<X86MachineFunctionInfo>()->setDecorationStyle(StdCall); 4680 return LowerCCCArguments(Op, DAG, true); 4681 case CallingConv::X86_FastCall: 4682 MF.getInfo<X86MachineFunctionInfo>()->setDecorationStyle(FastCall); 4683 return LowerFastCCArguments(Op, DAG); 4684 } 4685} 4686 4687SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) { 4688 SDOperand InFlag(0, 0); 4689 SDOperand Chain = Op.getOperand(0); 4690 unsigned Align = 4691 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 4692 if (Align == 0) Align = 1; 4693 4694 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4695 // If not DWORD aligned or size is more than the threshold, call memset. 4696 // The libc version is likely to be faster for these cases. It can use the 4697 // address value and run time information about the CPU. 4698 if ((Align & 3) != 0 || 4699 (I && I->getValue() > Subtarget->getMaxInlineSizeThreshold())) { 4700 MVT::ValueType IntPtr = getPointerTy(); 4701 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 4702 TargetLowering::ArgListTy Args; 4703 TargetLowering::ArgListEntry Entry; 4704 Entry.Node = Op.getOperand(1); 4705 Entry.Ty = IntPtrTy; 4706 Args.push_back(Entry); 4707 // Extend the unsigned i8 argument to be an int value for the call. 4708 Entry.Node = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2)); 4709 Entry.Ty = IntPtrTy; 4710 Args.push_back(Entry); 4711 Entry.Node = Op.getOperand(3); 4712 Args.push_back(Entry); 4713 std::pair<SDOperand,SDOperand> CallResult = 4714 LowerCallTo(Chain, Type::VoidTy, false, false, CallingConv::C, false, 4715 DAG.getExternalSymbol("memset", IntPtr), Args, DAG); 4716 return CallResult.second; 4717 } 4718 4719 MVT::ValueType AVT; 4720 SDOperand Count; 4721 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4722 unsigned BytesLeft = 0; 4723 bool TwoRepStos = false; 4724 if (ValC) { 4725 unsigned ValReg; 4726 uint64_t Val = ValC->getValue() & 255; 4727 4728 // If the value is a constant, then we can potentially use larger sets. 4729 switch (Align & 3) { 4730 case 2: // WORD aligned 4731 AVT = MVT::i16; 4732 ValReg = X86::AX; 4733 Val = (Val << 8) | Val; 4734 break; 4735 case 0: // DWORD aligned 4736 AVT = MVT::i32; 4737 ValReg = X86::EAX; 4738 Val = (Val << 8) | Val; 4739 Val = (Val << 16) | Val; 4740 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned 4741 AVT = MVT::i64; 4742 ValReg = X86::RAX; 4743 Val = (Val << 32) | Val; 4744 } 4745 break; 4746 default: // Byte aligned 4747 AVT = MVT::i8; 4748 ValReg = X86::AL; 4749 Count = Op.getOperand(3); 4750 break; 4751 } 4752 4753 if (AVT > MVT::i8) { 4754 if (I) { 4755 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4756 Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy()); 4757 BytesLeft = I->getValue() % UBytes; 4758 } else { 4759 assert(AVT >= MVT::i32 && 4760 "Do not use rep;stos if not at least DWORD aligned"); 4761 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 4762 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 4763 TwoRepStos = true; 4764 } 4765 } 4766 4767 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT), 4768 InFlag); 4769 InFlag = Chain.getValue(1); 4770 } else { 4771 AVT = MVT::i8; 4772 Count = Op.getOperand(3); 4773 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag); 4774 InFlag = Chain.getValue(1); 4775 } 4776 4777 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4778 Count, InFlag); 4779 InFlag = Chain.getValue(1); 4780 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4781 Op.getOperand(1), InFlag); 4782 InFlag = Chain.getValue(1); 4783 4784 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4785 SmallVector<SDOperand, 8> Ops; 4786 Ops.push_back(Chain); 4787 Ops.push_back(DAG.getValueType(AVT)); 4788 Ops.push_back(InFlag); 4789 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4790 4791 if (TwoRepStos) { 4792 InFlag = Chain.getValue(1); 4793 Count = Op.getOperand(3); 4794 MVT::ValueType CVT = Count.getValueType(); 4795 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4796 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4797 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4798 Left, InFlag); 4799 InFlag = Chain.getValue(1); 4800 Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4801 Ops.clear(); 4802 Ops.push_back(Chain); 4803 Ops.push_back(DAG.getValueType(MVT::i8)); 4804 Ops.push_back(InFlag); 4805 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4806 } else if (BytesLeft) { 4807 // Issue stores for the last 1 - 7 bytes. 4808 SDOperand Value; 4809 unsigned Val = ValC->getValue() & 255; 4810 unsigned Offset = I->getValue() - BytesLeft; 4811 SDOperand DstAddr = Op.getOperand(1); 4812 MVT::ValueType AddrVT = DstAddr.getValueType(); 4813 if (BytesLeft >= 4) { 4814 Val = (Val << 8) | Val; 4815 Val = (Val << 16) | Val; 4816 Value = DAG.getConstant(Val, MVT::i32); 4817 Chain = DAG.getStore(Chain, Value, 4818 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4819 DAG.getConstant(Offset, AddrVT)), 4820 NULL, 0); 4821 BytesLeft -= 4; 4822 Offset += 4; 4823 } 4824 if (BytesLeft >= 2) { 4825 Value = DAG.getConstant((Val << 8) | Val, MVT::i16); 4826 Chain = DAG.getStore(Chain, Value, 4827 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4828 DAG.getConstant(Offset, AddrVT)), 4829 NULL, 0); 4830 BytesLeft -= 2; 4831 Offset += 2; 4832 } 4833 if (BytesLeft == 1) { 4834 Value = DAG.getConstant(Val, MVT::i8); 4835 Chain = DAG.getStore(Chain, Value, 4836 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4837 DAG.getConstant(Offset, AddrVT)), 4838 NULL, 0); 4839 } 4840 } 4841 4842 return Chain; 4843} 4844 4845SDOperand X86TargetLowering::LowerMEMCPYInline(SDOperand Chain, 4846 SDOperand Dest, 4847 SDOperand Source, 4848 unsigned Size, 4849 unsigned Align, 4850 SelectionDAG &DAG) { 4851 MVT::ValueType AVT; 4852 unsigned BytesLeft = 0; 4853 switch (Align & 3) { 4854 case 2: // WORD aligned 4855 AVT = MVT::i16; 4856 break; 4857 case 0: // DWORD aligned 4858 AVT = MVT::i32; 4859 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned 4860 AVT = MVT::i64; 4861 break; 4862 default: // Byte aligned 4863 AVT = MVT::i8; 4864 break; 4865 } 4866 4867 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4868 SDOperand Count = DAG.getConstant(Size / UBytes, getPointerTy()); 4869 BytesLeft = Size % UBytes; 4870 4871 SDOperand InFlag(0, 0); 4872 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4873 Count, InFlag); 4874 InFlag = Chain.getValue(1); 4875 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4876 Dest, InFlag); 4877 InFlag = Chain.getValue(1); 4878 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI, 4879 Source, InFlag); 4880 InFlag = Chain.getValue(1); 4881 4882 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4883 SmallVector<SDOperand, 8> Ops; 4884 Ops.push_back(Chain); 4885 Ops.push_back(DAG.getValueType(AVT)); 4886 Ops.push_back(InFlag); 4887 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4888 4889 if (BytesLeft) { 4890 // Issue loads and stores for the last 1 - 7 bytes. 4891 unsigned Offset = Size - BytesLeft; 4892 SDOperand DstAddr = Dest; 4893 MVT::ValueType DstVT = DstAddr.getValueType(); 4894 SDOperand SrcAddr = Source; 4895 MVT::ValueType SrcVT = SrcAddr.getValueType(); 4896 SDOperand Value; 4897 if (BytesLeft >= 4) { 4898 Value = DAG.getLoad(MVT::i32, Chain, 4899 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4900 DAG.getConstant(Offset, SrcVT)), 4901 NULL, 0); 4902 Chain = Value.getValue(1); 4903 Chain = DAG.getStore(Chain, Value, 4904 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4905 DAG.getConstant(Offset, DstVT)), 4906 NULL, 0); 4907 BytesLeft -= 4; 4908 Offset += 4; 4909 } 4910 if (BytesLeft >= 2) { 4911 Value = DAG.getLoad(MVT::i16, Chain, 4912 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4913 DAG.getConstant(Offset, SrcVT)), 4914 NULL, 0); 4915 Chain = Value.getValue(1); 4916 Chain = DAG.getStore(Chain, Value, 4917 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4918 DAG.getConstant(Offset, DstVT)), 4919 NULL, 0); 4920 BytesLeft -= 2; 4921 Offset += 2; 4922 } 4923 4924 if (BytesLeft == 1) { 4925 Value = DAG.getLoad(MVT::i8, Chain, 4926 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4927 DAG.getConstant(Offset, SrcVT)), 4928 NULL, 0); 4929 Chain = Value.getValue(1); 4930 Chain = DAG.getStore(Chain, Value, 4931 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4932 DAG.getConstant(Offset, DstVT)), 4933 NULL, 0); 4934 } 4935 } 4936 4937 return Chain; 4938} 4939 4940/// Expand the result of: i64,outchain = READCYCLECOUNTER inchain 4941SDNode *X86TargetLowering::ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG){ 4942 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4943 SDOperand TheChain = N->getOperand(0); 4944 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheChain, 1); 4945 if (Subtarget->is64Bit()) { 4946 SDOperand rax = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); 4947 SDOperand rdx = DAG.getCopyFromReg(rax.getValue(1), X86::RDX, 4948 MVT::i64, rax.getValue(2)); 4949 SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, rdx, 4950 DAG.getConstant(32, MVT::i8)); 4951 SDOperand Ops[] = { 4952 DAG.getNode(ISD::OR, MVT::i64, rax, Tmp), rdx.getValue(1) 4953 }; 4954 4955 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4956 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4957 } 4958 4959 SDOperand eax = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); 4960 SDOperand edx = DAG.getCopyFromReg(eax.getValue(1), X86::EDX, 4961 MVT::i32, eax.getValue(2)); 4962 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 4963 SDOperand Ops[] = { eax, edx }; 4964 Ops[0] = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Ops, 2); 4965 4966 // Use a MERGE_VALUES to return the value and chain. 4967 Ops[1] = edx.getValue(1); 4968 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4969 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4970} 4971 4972SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { 4973 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2)); 4974 4975 if (!Subtarget->is64Bit()) { 4976 // vastart just stores the address of the VarArgsFrameIndex slot into the 4977 // memory location argument. 4978 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4979 return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV->getValue(), 4980 SV->getOffset()); 4981 } 4982 4983 // __va_list_tag: 4984 // gp_offset (0 - 6 * 8) 4985 // fp_offset (48 - 48 + 8 * 16) 4986 // overflow_arg_area (point to parameters coming in memory). 4987 // reg_save_area 4988 SmallVector<SDOperand, 8> MemOps; 4989 SDOperand FIN = Op.getOperand(1); 4990 // Store gp_offset 4991 SDOperand Store = DAG.getStore(Op.getOperand(0), 4992 DAG.getConstant(VarArgsGPOffset, MVT::i32), 4993 FIN, SV->getValue(), SV->getOffset()); 4994 MemOps.push_back(Store); 4995 4996 // Store fp_offset 4997 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 4998 DAG.getConstant(4, getPointerTy())); 4999 Store = DAG.getStore(Op.getOperand(0), 5000 DAG.getConstant(VarArgsFPOffset, MVT::i32), 5001 FIN, SV->getValue(), SV->getOffset()); 5002 MemOps.push_back(Store); 5003 5004 // Store ptr to overflow_arg_area 5005 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 5006 DAG.getConstant(4, getPointerTy())); 5007 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 5008 Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV->getValue(), 5009 SV->getOffset()); 5010 MemOps.push_back(Store); 5011 5012 // Store ptr to reg_save_area. 5013 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 5014 DAG.getConstant(8, getPointerTy())); 5015 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 5016 Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV->getValue(), 5017 SV->getOffset()); 5018 MemOps.push_back(Store); 5019 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size()); 5020} 5021 5022SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) { 5023 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 5024 SDOperand Chain = Op.getOperand(0); 5025 SDOperand DstPtr = Op.getOperand(1); 5026 SDOperand SrcPtr = Op.getOperand(2); 5027 SrcValueSDNode *DstSV = cast<SrcValueSDNode>(Op.getOperand(3)); 5028 SrcValueSDNode *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4)); 5029 5030 SrcPtr = DAG.getLoad(getPointerTy(), Chain, SrcPtr, 5031 SrcSV->getValue(), SrcSV->getOffset()); 5032 Chain = SrcPtr.getValue(1); 5033 for (unsigned i = 0; i < 3; ++i) { 5034 SDOperand Val = DAG.getLoad(MVT::i64, Chain, SrcPtr, 5035 SrcSV->getValue(), SrcSV->getOffset()); 5036 Chain = Val.getValue(1); 5037 Chain = DAG.getStore(Chain, Val, DstPtr, 5038 DstSV->getValue(), DstSV->getOffset()); 5039 if (i == 2) 5040 break; 5041 SrcPtr = DAG.getNode(ISD::ADD, getPointerTy(), SrcPtr, 5042 DAG.getConstant(8, getPointerTy())); 5043 DstPtr = DAG.getNode(ISD::ADD, getPointerTy(), DstPtr, 5044 DAG.getConstant(8, getPointerTy())); 5045 } 5046 return Chain; 5047} 5048 5049SDOperand 5050X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 5051 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); 5052 switch (IntNo) { 5053 default: return SDOperand(); // Don't custom lower most intrinsics. 5054 // Comparison intrinsics. 5055 case Intrinsic::x86_sse_comieq_ss: 5056 case Intrinsic::x86_sse_comilt_ss: 5057 case Intrinsic::x86_sse_comile_ss: 5058 case Intrinsic::x86_sse_comigt_ss: 5059 case Intrinsic::x86_sse_comige_ss: 5060 case Intrinsic::x86_sse_comineq_ss: 5061 case Intrinsic::x86_sse_ucomieq_ss: 5062 case Intrinsic::x86_sse_ucomilt_ss: 5063 case Intrinsic::x86_sse_ucomile_ss: 5064 case Intrinsic::x86_sse_ucomigt_ss: 5065 case Intrinsic::x86_sse_ucomige_ss: 5066 case Intrinsic::x86_sse_ucomineq_ss: 5067 case Intrinsic::x86_sse2_comieq_sd: 5068 case Intrinsic::x86_sse2_comilt_sd: 5069 case Intrinsic::x86_sse2_comile_sd: 5070 case Intrinsic::x86_sse2_comigt_sd: 5071 case Intrinsic::x86_sse2_comige_sd: 5072 case Intrinsic::x86_sse2_comineq_sd: 5073 case Intrinsic::x86_sse2_ucomieq_sd: 5074 case Intrinsic::x86_sse2_ucomilt_sd: 5075 case Intrinsic::x86_sse2_ucomile_sd: 5076 case Intrinsic::x86_sse2_ucomigt_sd: 5077 case Intrinsic::x86_sse2_ucomige_sd: 5078 case Intrinsic::x86_sse2_ucomineq_sd: { 5079 unsigned Opc = 0; 5080 ISD::CondCode CC = ISD::SETCC_INVALID; 5081 switch (IntNo) { 5082 default: break; 5083 case Intrinsic::x86_sse_comieq_ss: 5084 case Intrinsic::x86_sse2_comieq_sd: 5085 Opc = X86ISD::COMI; 5086 CC = ISD::SETEQ; 5087 break; 5088 case Intrinsic::x86_sse_comilt_ss: 5089 case Intrinsic::x86_sse2_comilt_sd: 5090 Opc = X86ISD::COMI; 5091 CC = ISD::SETLT; 5092 break; 5093 case Intrinsic::x86_sse_comile_ss: 5094 case Intrinsic::x86_sse2_comile_sd: 5095 Opc = X86ISD::COMI; 5096 CC = ISD::SETLE; 5097 break; 5098 case Intrinsic::x86_sse_comigt_ss: 5099 case Intrinsic::x86_sse2_comigt_sd: 5100 Opc = X86ISD::COMI; 5101 CC = ISD::SETGT; 5102 break; 5103 case Intrinsic::x86_sse_comige_ss: 5104 case Intrinsic::x86_sse2_comige_sd: 5105 Opc = X86ISD::COMI; 5106 CC = ISD::SETGE; 5107 break; 5108 case Intrinsic::x86_sse_comineq_ss: 5109 case Intrinsic::x86_sse2_comineq_sd: 5110 Opc = X86ISD::COMI; 5111 CC = ISD::SETNE; 5112 break; 5113 case Intrinsic::x86_sse_ucomieq_ss: 5114 case Intrinsic::x86_sse2_ucomieq_sd: 5115 Opc = X86ISD::UCOMI; 5116 CC = ISD::SETEQ; 5117 break; 5118 case Intrinsic::x86_sse_ucomilt_ss: 5119 case Intrinsic::x86_sse2_ucomilt_sd: 5120 Opc = X86ISD::UCOMI; 5121 CC = ISD::SETLT; 5122 break; 5123 case Intrinsic::x86_sse_ucomile_ss: 5124 case Intrinsic::x86_sse2_ucomile_sd: 5125 Opc = X86ISD::UCOMI; 5126 CC = ISD::SETLE; 5127 break; 5128 case Intrinsic::x86_sse_ucomigt_ss: 5129 case Intrinsic::x86_sse2_ucomigt_sd: 5130 Opc = X86ISD::UCOMI; 5131 CC = ISD::SETGT; 5132 break; 5133 case Intrinsic::x86_sse_ucomige_ss: 5134 case Intrinsic::x86_sse2_ucomige_sd: 5135 Opc = X86ISD::UCOMI; 5136 CC = ISD::SETGE; 5137 break; 5138 case Intrinsic::x86_sse_ucomineq_ss: 5139 case Intrinsic::x86_sse2_ucomineq_sd: 5140 Opc = X86ISD::UCOMI; 5141 CC = ISD::SETNE; 5142 break; 5143 } 5144 5145 unsigned X86CC; 5146 SDOperand LHS = Op.getOperand(1); 5147 SDOperand RHS = Op.getOperand(2); 5148 translateX86CC(CC, true, X86CC, LHS, RHS, DAG); 5149 5150 SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS); 5151 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8, 5152 DAG.getConstant(X86CC, MVT::i8), Cond); 5153 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); 5154 } 5155 } 5156} 5157 5158SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { 5159 // Depths > 0 not supported yet! 5160 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 5161 return SDOperand(); 5162 5163 // Just load the return address 5164 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 5165 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); 5166} 5167 5168SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { 5169 // Depths > 0 not supported yet! 5170 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 5171 return SDOperand(); 5172 5173 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 5174 return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI, 5175 DAG.getConstant(4, getPointerTy())); 5176} 5177 5178SDOperand X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDOperand Op, 5179 SelectionDAG &DAG) { 5180 // Is not yet supported on x86-64 5181 if (Subtarget->is64Bit()) 5182 return SDOperand(); 5183 5184 return DAG.getConstant(8, getPointerTy()); 5185} 5186 5187SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG) 5188{ 5189 assert(!Subtarget->is64Bit() && 5190 "Lowering of eh_return builtin is not supported yet on x86-64"); 5191 5192 MachineFunction &MF = DAG.getMachineFunction(); 5193 SDOperand Chain = Op.getOperand(0); 5194 SDOperand Offset = Op.getOperand(1); 5195 SDOperand Handler = Op.getOperand(2); 5196 5197 SDOperand Frame = DAG.getRegister(RegInfo->getFrameRegister(MF), 5198 getPointerTy()); 5199 5200 SDOperand StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame, 5201 DAG.getConstant(-4UL, getPointerTy())); 5202 StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset); 5203 Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0); 5204 Chain = DAG.getCopyToReg(Chain, X86::ECX, StoreAddr); 5205 MF.addLiveOut(X86::ECX); 5206 5207 return DAG.getNode(X86ISD::EH_RETURN, MVT::Other, 5208 Chain, DAG.getRegister(X86::ECX, getPointerTy())); 5209} 5210 5211SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, 5212 SelectionDAG &DAG) { 5213 SDOperand Root = Op.getOperand(0); 5214 SDOperand Trmp = Op.getOperand(1); // trampoline 5215 SDOperand FPtr = Op.getOperand(2); // nested function 5216 SDOperand Nest = Op.getOperand(3); // 'nest' parameter value 5217 5218 SrcValueSDNode *TrmpSV = cast<SrcValueSDNode>(Op.getOperand(4)); 5219 5220 if (Subtarget->is64Bit()) { 5221 return SDOperand(); // not yet supported 5222 } else { 5223 Function *Func = (Function *) 5224 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 5225 unsigned CC = Func->getCallingConv(); 5226 unsigned NestReg; 5227 5228 switch (CC) { 5229 default: 5230 assert(0 && "Unsupported calling convention"); 5231 case CallingConv::C: 5232 case CallingConv::X86_StdCall: { 5233 // Pass 'nest' parameter in ECX. 5234 // Must be kept in sync with X86CallingConv.td 5235 NestReg = X86::ECX; 5236 5237 // Check that ECX wasn't needed by an 'inreg' parameter. 5238 const FunctionType *FTy = Func->getFunctionType(); 5239 const ParamAttrsList *Attrs = Func->getParamAttrs(); 5240 5241 if (Attrs && !Func->isVarArg()) { 5242 unsigned InRegCount = 0; 5243 unsigned Idx = 1; 5244 5245 for (FunctionType::param_iterator I = FTy->param_begin(), 5246 E = FTy->param_end(); I != E; ++I, ++Idx) 5247 if (Attrs->paramHasAttr(Idx, ParamAttr::InReg)) 5248 // FIXME: should only count parameters that are lowered to integers. 5249 InRegCount += (getTargetData()->getTypeSizeInBits(*I) + 31) / 32; 5250 5251 if (InRegCount > 2) { 5252 cerr << "Nest register in use - reduce number of inreg parameters!\n"; 5253 abort(); 5254 } 5255 } 5256 break; 5257 } 5258 case CallingConv::X86_FastCall: 5259 // Pass 'nest' parameter in EAX. 5260 // Must be kept in sync with X86CallingConv.td 5261 NestReg = X86::EAX; 5262 break; 5263 } 5264 5265 const X86InstrInfo *TII = 5266 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 5267 5268 SDOperand OutChains[4]; 5269 SDOperand Addr, Disp; 5270 5271 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32)); 5272 Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr); 5273 5274 unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri); 5275 unsigned char N86Reg = ((X86RegisterInfo&)RegInfo).getX86RegNum(NestReg); 5276 OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 5277 Trmp, TrmpSV->getValue(), TrmpSV->getOffset()); 5278 5279 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32)); 5280 OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpSV->getValue(), 5281 TrmpSV->getOffset() + 1, false, 1); 5282 5283 unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP); 5284 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32)); 5285 OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr, 5286 TrmpSV->getValue() + 5, TrmpSV->getOffset()); 5287 5288 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(6, MVT::i32)); 5289 OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpSV->getValue(), 5290 TrmpSV->getOffset() + 6, false, 1); 5291 5292 SDOperand Ops[] = 5293 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) }; 5294 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5295 } 5296} 5297 5298SDOperand X86TargetLowering::LowerFLT_ROUNDS(SDOperand Op, SelectionDAG &DAG) { 5299 /* 5300 The rounding mode is in bits 11:10 of FPSR, and has the following 5301 settings: 5302 00 Round to nearest 5303 01 Round to -inf 5304 10 Round to +inf 5305 11 Round to 0 5306 5307 FLT_ROUNDS, on the other hand, expects the following: 5308 -1 Undefined 5309 0 Round to 0 5310 1 Round to nearest 5311 2 Round to +inf 5312 3 Round to -inf 5313 5314 To perform the conversion, we do: 5315 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 5316 */ 5317 5318 MachineFunction &MF = DAG.getMachineFunction(); 5319 const TargetMachine &TM = MF.getTarget(); 5320 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 5321 unsigned StackAlignment = TFI.getStackAlignment(); 5322 MVT::ValueType VT = Op.getValueType(); 5323 5324 // Save FP Control Word to stack slot 5325 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment); 5326 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 5327 5328 SDOperand Chain = DAG.getNode(X86ISD::FNSTCW16m, MVT::Other, 5329 DAG.getEntryNode(), StackSlot); 5330 5331 // Load FP Control Word from stack slot 5332 SDOperand CWD = DAG.getLoad(MVT::i16, Chain, StackSlot, NULL, 0); 5333 5334 // Transform as necessary 5335 SDOperand CWD1 = 5336 DAG.getNode(ISD::SRL, MVT::i16, 5337 DAG.getNode(ISD::AND, MVT::i16, 5338 CWD, DAG.getConstant(0x800, MVT::i16)), 5339 DAG.getConstant(11, MVT::i8)); 5340 SDOperand CWD2 = 5341 DAG.getNode(ISD::SRL, MVT::i16, 5342 DAG.getNode(ISD::AND, MVT::i16, 5343 CWD, DAG.getConstant(0x400, MVT::i16)), 5344 DAG.getConstant(9, MVT::i8)); 5345 5346 SDOperand RetVal = 5347 DAG.getNode(ISD::AND, MVT::i16, 5348 DAG.getNode(ISD::ADD, MVT::i16, 5349 DAG.getNode(ISD::OR, MVT::i16, CWD1, CWD2), 5350 DAG.getConstant(1, MVT::i16)), 5351 DAG.getConstant(3, MVT::i16)); 5352 5353 5354 return DAG.getNode((MVT::getSizeInBits(VT) < 16 ? 5355 ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); 5356} 5357 5358/// LowerOperation - Provide custom lowering hooks for some operations. 5359/// 5360SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 5361 switch (Op.getOpcode()) { 5362 default: assert(0 && "Should not custom lower this!"); 5363 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 5364 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5365 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 5366 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 5367 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 5368 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5369 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 5370 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5371 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 5372 case ISD::SHL_PARTS: 5373 case ISD::SRA_PARTS: 5374 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 5375 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 5376 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 5377 case ISD::FABS: return LowerFABS(Op, DAG); 5378 case ISD::FNEG: return LowerFNEG(Op, DAG); 5379 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 5380 case ISD::SETCC: return LowerSETCC(Op, DAG); 5381 case ISD::SELECT: return LowerSELECT(Op, DAG); 5382 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 5383 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 5384 case ISD::CALL: return LowerCALL(Op, DAG); 5385 case ISD::RET: return LowerRET(Op, DAG); 5386 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); 5387 case ISD::MEMSET: return LowerMEMSET(Op, DAG); 5388 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG); 5389 case ISD::VASTART: return LowerVASTART(Op, DAG); 5390 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 5391 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 5392 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5393 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5394 case ISD::FRAME_TO_ARGS_OFFSET: 5395 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 5396 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 5397 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 5398 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 5399 case ISD::FLT_ROUNDS: return LowerFLT_ROUNDS(Op, DAG); 5400 5401 5402 // FIXME: REMOVE THIS WHEN LegalizeDAGTypes lands. 5403 case ISD::READCYCLECOUNTER: 5404 return SDOperand(ExpandREADCYCLECOUNTER(Op.Val, DAG), 0); 5405 } 5406} 5407 5408/// ExpandOperation - Provide custom lowering hooks for expanding operations. 5409SDNode *X86TargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) { 5410 switch (N->getOpcode()) { 5411 default: assert(0 && "Should not custom lower this!"); 5412 case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG); 5413 case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG); 5414 } 5415} 5416 5417const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 5418 switch (Opcode) { 5419 default: return NULL; 5420 case X86ISD::SHLD: return "X86ISD::SHLD"; 5421 case X86ISD::SHRD: return "X86ISD::SHRD"; 5422 case X86ISD::FAND: return "X86ISD::FAND"; 5423 case X86ISD::FOR: return "X86ISD::FOR"; 5424 case X86ISD::FXOR: return "X86ISD::FXOR"; 5425 case X86ISD::FSRL: return "X86ISD::FSRL"; 5426 case X86ISD::FILD: return "X86ISD::FILD"; 5427 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 5428 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 5429 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 5430 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 5431 case X86ISD::FLD: return "X86ISD::FLD"; 5432 case X86ISD::FST: return "X86ISD::FST"; 5433 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT"; 5434 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT"; 5435 case X86ISD::CALL: return "X86ISD::CALL"; 5436 case X86ISD::TAILCALL: return "X86ISD::TAILCALL"; 5437 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 5438 case X86ISD::CMP: return "X86ISD::CMP"; 5439 case X86ISD::COMI: return "X86ISD::COMI"; 5440 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 5441 case X86ISD::SETCC: return "X86ISD::SETCC"; 5442 case X86ISD::CMOV: return "X86ISD::CMOV"; 5443 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 5444 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 5445 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 5446 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 5447 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 5448 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 5449 case X86ISD::S2VEC: return "X86ISD::S2VEC"; 5450 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 5451 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 5452 case X86ISD::FMAX: return "X86ISD::FMAX"; 5453 case X86ISD::FMIN: return "X86ISD::FMIN"; 5454 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 5455 case X86ISD::FRCP: return "X86ISD::FRCP"; 5456 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 5457 case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER"; 5458 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 5459 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 5460 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 5461 } 5462} 5463 5464// isLegalAddressingMode - Return true if the addressing mode represented 5465// by AM is legal for this target, for a load/store of the specified type. 5466bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 5467 const Type *Ty) const { 5468 // X86 supports extremely general addressing modes. 5469 5470 // X86 allows a sign-extended 32-bit immediate field as a displacement. 5471 if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1) 5472 return false; 5473 5474 if (AM.BaseGV) { 5475 // We can only fold this if we don't need an extra load. 5476 if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false)) 5477 return false; 5478 5479 // X86-64 only supports addr of globals in small code model. 5480 if (Subtarget->is64Bit()) { 5481 if (getTargetMachine().getCodeModel() != CodeModel::Small) 5482 return false; 5483 // If lower 4G is not available, then we must use rip-relative addressing. 5484 if (AM.BaseOffs || AM.Scale > 1) 5485 return false; 5486 } 5487 } 5488 5489 switch (AM.Scale) { 5490 case 0: 5491 case 1: 5492 case 2: 5493 case 4: 5494 case 8: 5495 // These scales always work. 5496 break; 5497 case 3: 5498 case 5: 5499 case 9: 5500 // These scales are formed with basereg+scalereg. Only accept if there is 5501 // no basereg yet. 5502 if (AM.HasBaseReg) 5503 return false; 5504 break; 5505 default: // Other stuff never works. 5506 return false; 5507 } 5508 5509 return true; 5510} 5511 5512 5513bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const { 5514 if (!Ty1->isInteger() || !Ty2->isInteger()) 5515 return false; 5516 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 5517 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 5518 if (NumBits1 <= NumBits2) 5519 return false; 5520 return Subtarget->is64Bit() || NumBits1 < 64; 5521} 5522 5523bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1, 5524 MVT::ValueType VT2) const { 5525 if (!MVT::isInteger(VT1) || !MVT::isInteger(VT2)) 5526 return false; 5527 unsigned NumBits1 = MVT::getSizeInBits(VT1); 5528 unsigned NumBits2 = MVT::getSizeInBits(VT2); 5529 if (NumBits1 <= NumBits2) 5530 return false; 5531 return Subtarget->is64Bit() || NumBits1 < 64; 5532} 5533 5534/// isShuffleMaskLegal - Targets can use this to indicate that they only 5535/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 5536/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 5537/// are assumed to be legal. 5538bool 5539X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { 5540 // Only do shuffles on 128-bit vector types for now. 5541 if (MVT::getSizeInBits(VT) == 64) return false; 5542 return (Mask.Val->getNumOperands() <= 4 || 5543 isIdentityMask(Mask.Val) || 5544 isIdentityMask(Mask.Val, true) || 5545 isSplatMask(Mask.Val) || 5546 isPSHUFHW_PSHUFLWMask(Mask.Val) || 5547 X86::isUNPCKLMask(Mask.Val) || 5548 X86::isUNPCKHMask(Mask.Val) || 5549 X86::isUNPCKL_v_undef_Mask(Mask.Val) || 5550 X86::isUNPCKH_v_undef_Mask(Mask.Val)); 5551} 5552 5553bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps, 5554 MVT::ValueType EVT, 5555 SelectionDAG &DAG) const { 5556 unsigned NumElts = BVOps.size(); 5557 // Only do shuffles on 128-bit vector types for now. 5558 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; 5559 if (NumElts == 2) return true; 5560 if (NumElts == 4) { 5561 return (isMOVLMask(&BVOps[0], 4) || 5562 isCommutedMOVL(&BVOps[0], 4, true) || 5563 isSHUFPMask(&BVOps[0], 4) || 5564 isCommutedSHUFP(&BVOps[0], 4)); 5565 } 5566 return false; 5567} 5568 5569//===----------------------------------------------------------------------===// 5570// X86 Scheduler Hooks 5571//===----------------------------------------------------------------------===// 5572 5573MachineBasicBlock * 5574X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 5575 MachineBasicBlock *BB) { 5576 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5577 switch (MI->getOpcode()) { 5578 default: assert(false && "Unexpected instr type to insert"); 5579 case X86::CMOV_FR32: 5580 case X86::CMOV_FR64: 5581 case X86::CMOV_V4F32: 5582 case X86::CMOV_V2F64: 5583 case X86::CMOV_V2I64: { 5584 // To "insert" a SELECT_CC instruction, we actually have to insert the 5585 // diamond control-flow pattern. The incoming instruction knows the 5586 // destination vreg to set, the condition code register to branch on, the 5587 // true/false values to select between, and a branch opcode to use. 5588 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5589 ilist<MachineBasicBlock>::iterator It = BB; 5590 ++It; 5591 5592 // thisMBB: 5593 // ... 5594 // TrueVal = ... 5595 // cmpTY ccX, r1, r2 5596 // bCC copy1MBB 5597 // fallthrough --> copy0MBB 5598 MachineBasicBlock *thisMBB = BB; 5599 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 5600 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 5601 unsigned Opc = 5602 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 5603 BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB); 5604 MachineFunction *F = BB->getParent(); 5605 F->getBasicBlockList().insert(It, copy0MBB); 5606 F->getBasicBlockList().insert(It, sinkMBB); 5607 // Update machine-CFG edges by first adding all successors of the current 5608 // block to the new block which will contain the Phi node for the select. 5609 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 5610 e = BB->succ_end(); i != e; ++i) 5611 sinkMBB->addSuccessor(*i); 5612 // Next, remove all successors of the current block, and add the true 5613 // and fallthrough blocks as its successors. 5614 while(!BB->succ_empty()) 5615 BB->removeSuccessor(BB->succ_begin()); 5616 BB->addSuccessor(copy0MBB); 5617 BB->addSuccessor(sinkMBB); 5618 5619 // copy0MBB: 5620 // %FalseValue = ... 5621 // # fallthrough to sinkMBB 5622 BB = copy0MBB; 5623 5624 // Update machine-CFG edges 5625 BB->addSuccessor(sinkMBB); 5626 5627 // sinkMBB: 5628 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 5629 // ... 5630 BB = sinkMBB; 5631 BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg()) 5632 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 5633 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 5634 5635 delete MI; // The pseudo instruction is gone now. 5636 return BB; 5637 } 5638 5639 case X86::FP32_TO_INT16_IN_MEM: 5640 case X86::FP32_TO_INT32_IN_MEM: 5641 case X86::FP32_TO_INT64_IN_MEM: 5642 case X86::FP64_TO_INT16_IN_MEM: 5643 case X86::FP64_TO_INT32_IN_MEM: 5644 case X86::FP64_TO_INT64_IN_MEM: 5645 case X86::FP80_TO_INT16_IN_MEM: 5646 case X86::FP80_TO_INT32_IN_MEM: 5647 case X86::FP80_TO_INT64_IN_MEM: { 5648 // Change the floating point control register to use "round towards zero" 5649 // mode when truncating to an integer value. 5650 MachineFunction *F = BB->getParent(); 5651 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2); 5652 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx); 5653 5654 // Load the old value of the high byte of the control word... 5655 unsigned OldCW = 5656 F->getSSARegMap()->createVirtualRegister(X86::GR16RegisterClass); 5657 addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx); 5658 5659 // Set the high part to be round to zero... 5660 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx) 5661 .addImm(0xC7F); 5662 5663 // Reload the modified control word now... 5664 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5665 5666 // Restore the memory image of control word to original value 5667 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx) 5668 .addReg(OldCW); 5669 5670 // Get the X86 opcode to use. 5671 unsigned Opc; 5672 switch (MI->getOpcode()) { 5673 default: assert(0 && "illegal opcode!"); 5674 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 5675 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 5676 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 5677 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 5678 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 5679 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 5680 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 5681 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 5682 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 5683 } 5684 5685 X86AddressMode AM; 5686 MachineOperand &Op = MI->getOperand(0); 5687 if (Op.isRegister()) { 5688 AM.BaseType = X86AddressMode::RegBase; 5689 AM.Base.Reg = Op.getReg(); 5690 } else { 5691 AM.BaseType = X86AddressMode::FrameIndexBase; 5692 AM.Base.FrameIndex = Op.getFrameIndex(); 5693 } 5694 Op = MI->getOperand(1); 5695 if (Op.isImmediate()) 5696 AM.Scale = Op.getImm(); 5697 Op = MI->getOperand(2); 5698 if (Op.isImmediate()) 5699 AM.IndexReg = Op.getImm(); 5700 Op = MI->getOperand(3); 5701 if (Op.isGlobalAddress()) { 5702 AM.GV = Op.getGlobal(); 5703 } else { 5704 AM.Disp = Op.getImm(); 5705 } 5706 addFullAddress(BuildMI(BB, TII->get(Opc)), AM) 5707 .addReg(MI->getOperand(4).getReg()); 5708 5709 // Reload the original control word now. 5710 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5711 5712 delete MI; // The pseudo instruction is gone now. 5713 return BB; 5714 } 5715 } 5716} 5717 5718//===----------------------------------------------------------------------===// 5719// X86 Optimization Hooks 5720//===----------------------------------------------------------------------===// 5721 5722void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 5723 uint64_t Mask, 5724 uint64_t &KnownZero, 5725 uint64_t &KnownOne, 5726 const SelectionDAG &DAG, 5727 unsigned Depth) const { 5728 unsigned Opc = Op.getOpcode(); 5729 assert((Opc >= ISD::BUILTIN_OP_END || 5730 Opc == ISD::INTRINSIC_WO_CHAIN || 5731 Opc == ISD::INTRINSIC_W_CHAIN || 5732 Opc == ISD::INTRINSIC_VOID) && 5733 "Should use MaskedValueIsZero if you don't know whether Op" 5734 " is a target node!"); 5735 5736 KnownZero = KnownOne = 0; // Don't know anything. 5737 switch (Opc) { 5738 default: break; 5739 case X86ISD::SETCC: 5740 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL); 5741 break; 5742 } 5743} 5744 5745/// getShuffleScalarElt - Returns the scalar element that will make up the ith 5746/// element of the result of the vector shuffle. 5747static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) { 5748 MVT::ValueType VT = N->getValueType(0); 5749 SDOperand PermMask = N->getOperand(2); 5750 unsigned NumElems = PermMask.getNumOperands(); 5751 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1); 5752 i %= NumElems; 5753 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5754 return (i == 0) 5755 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5756 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) { 5757 SDOperand Idx = PermMask.getOperand(i); 5758 if (Idx.getOpcode() == ISD::UNDEF) 5759 return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5760 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG); 5761 } 5762 return SDOperand(); 5763} 5764 5765/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 5766/// node is a GlobalAddress + an offset. 5767static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) { 5768 unsigned Opc = N->getOpcode(); 5769 if (Opc == X86ISD::Wrapper) { 5770 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) { 5771 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 5772 return true; 5773 } 5774 } else if (Opc == ISD::ADD) { 5775 SDOperand N1 = N->getOperand(0); 5776 SDOperand N2 = N->getOperand(1); 5777 if (isGAPlusOffset(N1.Val, GA, Offset)) { 5778 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 5779 if (V) { 5780 Offset += V->getSignExtended(); 5781 return true; 5782 } 5783 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 5784 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 5785 if (V) { 5786 Offset += V->getSignExtended(); 5787 return true; 5788 } 5789 } 5790 } 5791 return false; 5792} 5793 5794/// isConsecutiveLoad - Returns true if N is loading from an address of Base 5795/// + Dist * Size. 5796static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size, 5797 MachineFrameInfo *MFI) { 5798 if (N->getOperand(0).Val != Base->getOperand(0).Val) 5799 return false; 5800 5801 SDOperand Loc = N->getOperand(1); 5802 SDOperand BaseLoc = Base->getOperand(1); 5803 if (Loc.getOpcode() == ISD::FrameIndex) { 5804 if (BaseLoc.getOpcode() != ISD::FrameIndex) 5805 return false; 5806 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 5807 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 5808 int FS = MFI->getObjectSize(FI); 5809 int BFS = MFI->getObjectSize(BFI); 5810 if (FS != BFS || FS != Size) return false; 5811 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size); 5812 } else { 5813 GlobalValue *GV1 = NULL; 5814 GlobalValue *GV2 = NULL; 5815 int64_t Offset1 = 0; 5816 int64_t Offset2 = 0; 5817 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 5818 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 5819 if (isGA1 && isGA2 && GV1 == GV2) 5820 return Offset1 == (Offset2 + Dist*Size); 5821 } 5822 5823 return false; 5824} 5825 5826static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI, 5827 const X86Subtarget *Subtarget) { 5828 GlobalValue *GV; 5829 int64_t Offset; 5830 if (isGAPlusOffset(Base, GV, Offset)) 5831 return (GV->getAlignment() >= 16 && (Offset % 16) == 0); 5832 else { 5833 assert(Base->getOpcode() == ISD::FrameIndex && "Unexpected base node!"); 5834 int BFI = cast<FrameIndexSDNode>(Base)->getIndex(); 5835 if (BFI < 0) 5836 // Fixed objects do not specify alignment, however the offsets are known. 5837 return ((Subtarget->getStackAlignment() % 16) == 0 && 5838 (MFI->getObjectOffset(BFI) % 16) == 0); 5839 else 5840 return MFI->getObjectAlignment(BFI) >= 16; 5841 } 5842 return false; 5843} 5844 5845 5846/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 5847/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 5848/// if the load addresses are consecutive, non-overlapping, and in the right 5849/// order. 5850static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 5851 const X86Subtarget *Subtarget) { 5852 MachineFunction &MF = DAG.getMachineFunction(); 5853 MachineFrameInfo *MFI = MF.getFrameInfo(); 5854 MVT::ValueType VT = N->getValueType(0); 5855 MVT::ValueType EVT = MVT::getVectorElementType(VT); 5856 SDOperand PermMask = N->getOperand(2); 5857 int NumElems = (int)PermMask.getNumOperands(); 5858 SDNode *Base = NULL; 5859 for (int i = 0; i < NumElems; ++i) { 5860 SDOperand Idx = PermMask.getOperand(i); 5861 if (Idx.getOpcode() == ISD::UNDEF) { 5862 if (!Base) return SDOperand(); 5863 } else { 5864 SDOperand Arg = 5865 getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG); 5866 if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val)) 5867 return SDOperand(); 5868 if (!Base) 5869 Base = Arg.Val; 5870 else if (!isConsecutiveLoad(Arg.Val, Base, 5871 i, MVT::getSizeInBits(EVT)/8,MFI)) 5872 return SDOperand(); 5873 } 5874 } 5875 5876 bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget); 5877 LoadSDNode *LD = cast<LoadSDNode>(Base); 5878 if (isAlign16) { 5879 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 5880 LD->getSrcValueOffset(), LD->isVolatile()); 5881 } else { 5882 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 5883 LD->getSrcValueOffset(), LD->isVolatile(), 5884 LD->getAlignment()); 5885 } 5886} 5887 5888/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 5889static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 5890 const X86Subtarget *Subtarget) { 5891 SDOperand Cond = N->getOperand(0); 5892 5893 // If we have SSE[12] support, try to form min/max nodes. 5894 if (Subtarget->hasSSE2() && 5895 (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) { 5896 if (Cond.getOpcode() == ISD::SETCC) { 5897 // Get the LHS/RHS of the select. 5898 SDOperand LHS = N->getOperand(1); 5899 SDOperand RHS = N->getOperand(2); 5900 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 5901 5902 unsigned Opcode = 0; 5903 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) { 5904 switch (CC) { 5905 default: break; 5906 case ISD::SETOLE: // (X <= Y) ? X : Y -> min 5907 case ISD::SETULE: 5908 case ISD::SETLE: 5909 if (!UnsafeFPMath) break; 5910 // FALL THROUGH. 5911 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min 5912 case ISD::SETLT: 5913 Opcode = X86ISD::FMIN; 5914 break; 5915 5916 case ISD::SETOGT: // (X > Y) ? X : Y -> max 5917 case ISD::SETUGT: 5918 case ISD::SETGT: 5919 if (!UnsafeFPMath) break; 5920 // FALL THROUGH. 5921 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max 5922 case ISD::SETGE: 5923 Opcode = X86ISD::FMAX; 5924 break; 5925 } 5926 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) { 5927 switch (CC) { 5928 default: break; 5929 case ISD::SETOGT: // (X > Y) ? Y : X -> min 5930 case ISD::SETUGT: 5931 case ISD::SETGT: 5932 if (!UnsafeFPMath) break; 5933 // FALL THROUGH. 5934 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min 5935 case ISD::SETGE: 5936 Opcode = X86ISD::FMIN; 5937 break; 5938 5939 case ISD::SETOLE: // (X <= Y) ? Y : X -> max 5940 case ISD::SETULE: 5941 case ISD::SETLE: 5942 if (!UnsafeFPMath) break; 5943 // FALL THROUGH. 5944 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max 5945 case ISD::SETLT: 5946 Opcode = X86ISD::FMAX; 5947 break; 5948 } 5949 } 5950 5951 if (Opcode) 5952 return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS); 5953 } 5954 5955 } 5956 5957 return SDOperand(); 5958} 5959 5960 5961SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, 5962 DAGCombinerInfo &DCI) const { 5963 SelectionDAG &DAG = DCI.DAG; 5964 switch (N->getOpcode()) { 5965 default: break; 5966 case ISD::VECTOR_SHUFFLE: 5967 return PerformShuffleCombine(N, DAG, Subtarget); 5968 case ISD::SELECT: 5969 return PerformSELECTCombine(N, DAG, Subtarget); 5970 } 5971 5972 return SDOperand(); 5973} 5974 5975//===----------------------------------------------------------------------===// 5976// X86 Inline Assembly Support 5977//===----------------------------------------------------------------------===// 5978 5979/// getConstraintType - Given a constraint letter, return the type of 5980/// constraint it is for this target. 5981X86TargetLowering::ConstraintType 5982X86TargetLowering::getConstraintType(const std::string &Constraint) const { 5983 if (Constraint.size() == 1) { 5984 switch (Constraint[0]) { 5985 case 'A': 5986 case 'r': 5987 case 'R': 5988 case 'l': 5989 case 'q': 5990 case 'Q': 5991 case 'x': 5992 case 'Y': 5993 return C_RegisterClass; 5994 default: 5995 break; 5996 } 5997 } 5998 return TargetLowering::getConstraintType(Constraint); 5999} 6000 6001/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 6002/// vector. If it is invalid, don't add anything to Ops. 6003void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, 6004 char Constraint, 6005 std::vector<SDOperand>&Ops, 6006 SelectionDAG &DAG) { 6007 SDOperand Result(0, 0); 6008 6009 switch (Constraint) { 6010 default: break; 6011 case 'I': 6012 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 6013 if (C->getValue() <= 31) { 6014 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 6015 break; 6016 } 6017 } 6018 return; 6019 case 'N': 6020 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 6021 if (C->getValue() <= 255) { 6022 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 6023 break; 6024 } 6025 } 6026 return; 6027 case 'i': { 6028 // Literal immediates are always ok. 6029 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 6030 Result = DAG.getTargetConstant(CST->getValue(), Op.getValueType()); 6031 break; 6032 } 6033 6034 // If we are in non-pic codegen mode, we allow the address of a global (with 6035 // an optional displacement) to be used with 'i'. 6036 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 6037 int64_t Offset = 0; 6038 6039 // Match either (GA) or (GA+C) 6040 if (GA) { 6041 Offset = GA->getOffset(); 6042 } else if (Op.getOpcode() == ISD::ADD) { 6043 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6044 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 6045 if (C && GA) { 6046 Offset = GA->getOffset()+C->getValue(); 6047 } else { 6048 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6049 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 6050 if (C && GA) 6051 Offset = GA->getOffset()+C->getValue(); 6052 else 6053 C = 0, GA = 0; 6054 } 6055 } 6056 6057 if (GA) { 6058 // If addressing this global requires a load (e.g. in PIC mode), we can't 6059 // match. 6060 if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(), 6061 false)) 6062 return; 6063 6064 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), 6065 Offset); 6066 Result = Op; 6067 break; 6068 } 6069 6070 // Otherwise, not valid for this mode. 6071 return; 6072 } 6073 } 6074 6075 if (Result.Val) { 6076 Ops.push_back(Result); 6077 return; 6078 } 6079 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 6080} 6081 6082std::vector<unsigned> X86TargetLowering:: 6083getRegClassForInlineAsmConstraint(const std::string &Constraint, 6084 MVT::ValueType VT) const { 6085 if (Constraint.size() == 1) { 6086 // FIXME: not handling fp-stack yet! 6087 switch (Constraint[0]) { // GCC X86 Constraint Letters 6088 default: break; // Unknown constraint letter 6089 case 'A': // EAX/EDX 6090 if (VT == MVT::i32 || VT == MVT::i64) 6091 return make_vector<unsigned>(X86::EAX, X86::EDX, 0); 6092 break; 6093 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode) 6094 case 'Q': // Q_REGS 6095 if (VT == MVT::i32) 6096 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 6097 else if (VT == MVT::i16) 6098 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 6099 else if (VT == MVT::i8) 6100 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0); 6101 else if (VT == MVT::i64) 6102 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0); 6103 break; 6104 } 6105 } 6106 6107 return std::vector<unsigned>(); 6108} 6109 6110std::pair<unsigned, const TargetRegisterClass*> 6111X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 6112 MVT::ValueType VT) const { 6113 // First, see if this is a constraint that directly corresponds to an LLVM 6114 // register class. 6115 if (Constraint.size() == 1) { 6116 // GCC Constraint Letters 6117 switch (Constraint[0]) { 6118 default: break; 6119 case 'r': // GENERAL_REGS 6120 case 'R': // LEGACY_REGS 6121 case 'l': // INDEX_REGS 6122 if (VT == MVT::i64 && Subtarget->is64Bit()) 6123 return std::make_pair(0U, X86::GR64RegisterClass); 6124 if (VT == MVT::i32) 6125 return std::make_pair(0U, X86::GR32RegisterClass); 6126 else if (VT == MVT::i16) 6127 return std::make_pair(0U, X86::GR16RegisterClass); 6128 else if (VT == MVT::i8) 6129 return std::make_pair(0U, X86::GR8RegisterClass); 6130 break; 6131 case 'y': // MMX_REGS if MMX allowed. 6132 if (!Subtarget->hasMMX()) break; 6133 return std::make_pair(0U, X86::VR64RegisterClass); 6134 break; 6135 case 'Y': // SSE_REGS if SSE2 allowed 6136 if (!Subtarget->hasSSE2()) break; 6137 // FALL THROUGH. 6138 case 'x': // SSE_REGS if SSE1 allowed 6139 if (!Subtarget->hasSSE1()) break; 6140 6141 switch (VT) { 6142 default: break; 6143 // Scalar SSE types. 6144 case MVT::f32: 6145 case MVT::i32: 6146 return std::make_pair(0U, X86::FR32RegisterClass); 6147 case MVT::f64: 6148 case MVT::i64: 6149 return std::make_pair(0U, X86::FR64RegisterClass); 6150 // Vector types. 6151 case MVT::v16i8: 6152 case MVT::v8i16: 6153 case MVT::v4i32: 6154 case MVT::v2i64: 6155 case MVT::v4f32: 6156 case MVT::v2f64: 6157 return std::make_pair(0U, X86::VR128RegisterClass); 6158 } 6159 break; 6160 } 6161 } 6162 6163 // Use the default implementation in TargetLowering to convert the register 6164 // constraint into a member of a register class. 6165 std::pair<unsigned, const TargetRegisterClass*> Res; 6166 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 6167 6168 // Not found as a standard register? 6169 if (Res.second == 0) { 6170 // GCC calls "st(0)" just plain "st". 6171 if (StringsEqualNoCase("{st}", Constraint)) { 6172 Res.first = X86::ST0; 6173 Res.second = X86::RFP80RegisterClass; 6174 } 6175 6176 return Res; 6177 } 6178 6179 // Otherwise, check to see if this is a register class of the wrong value 6180 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 6181 // turn into {ax},{dx}. 6182 if (Res.second->hasType(VT)) 6183 return Res; // Correct type already, nothing to do. 6184 6185 // All of the single-register GCC register classes map their values onto 6186 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 6187 // really want an 8-bit or 32-bit register, map to the appropriate register 6188 // class and return the appropriate register. 6189 if (Res.second != X86::GR16RegisterClass) 6190 return Res; 6191 6192 if (VT == MVT::i8) { 6193 unsigned DestReg = 0; 6194 switch (Res.first) { 6195 default: break; 6196 case X86::AX: DestReg = X86::AL; break; 6197 case X86::DX: DestReg = X86::DL; break; 6198 case X86::CX: DestReg = X86::CL; break; 6199 case X86::BX: DestReg = X86::BL; break; 6200 } 6201 if (DestReg) { 6202 Res.first = DestReg; 6203 Res.second = Res.second = X86::GR8RegisterClass; 6204 } 6205 } else if (VT == MVT::i32) { 6206 unsigned DestReg = 0; 6207 switch (Res.first) { 6208 default: break; 6209 case X86::AX: DestReg = X86::EAX; break; 6210 case X86::DX: DestReg = X86::EDX; break; 6211 case X86::CX: DestReg = X86::ECX; break; 6212 case X86::BX: DestReg = X86::EBX; break; 6213 case X86::SI: DestReg = X86::ESI; break; 6214 case X86::DI: DestReg = X86::EDI; break; 6215 case X86::BP: DestReg = X86::EBP; break; 6216 case X86::SP: DestReg = X86::ESP; break; 6217 } 6218 if (DestReg) { 6219 Res.first = DestReg; 6220 Res.second = Res.second = X86::GR32RegisterClass; 6221 } 6222 } else if (VT == MVT::i64) { 6223 unsigned DestReg = 0; 6224 switch (Res.first) { 6225 default: break; 6226 case X86::AX: DestReg = X86::RAX; break; 6227 case X86::DX: DestReg = X86::RDX; break; 6228 case X86::CX: DestReg = X86::RCX; break; 6229 case X86::BX: DestReg = X86::RBX; break; 6230 case X86::SI: DestReg = X86::RSI; break; 6231 case X86::DI: DestReg = X86::RDI; break; 6232 case X86::BP: DestReg = X86::RBP; break; 6233 case X86::SP: DestReg = X86::RSP; break; 6234 } 6235 if (DestReg) { 6236 Res.first = DestReg; 6237 Res.second = Res.second = X86::GR64RegisterClass; 6238 } 6239 } 6240 6241 return Res; 6242} 6243